--- /dev/null
+# Compiled Object files
+*.slo
+*.lo
+*.o
+*.obj
+
+# Compiled Dynamic libraries
+*.so
+*.dylib
+*.dll
+
+# Compiled Static libraries
+*.lai
+*.la
+*.a
+*.lib
+
+# Executables
+*.exe
+*.out
+*.app
+
+# Autotools
+/ac
+Makefile
+Makefile.in
+/aclocal.m4
+/autom4te.cache/
+/config.*
+/configure
+/depcomp
+/install-sh
+/libtool
+/ltmain.sh
+/m4/
+/missing
+/stamp-h?
+.deps/
+.dirstamp
+.libs/
+*.l[ao]
+*~
+*.pc
+
+# KDevelop
+.kdev4/
+*.kdev4
+
+# Python
+*.pyc
+*.stamp
+python/opendht.cpp
+python/setup.py
+
+# Doxygen
+doc/Doxyfile
+
+# git backup files
+*.orig
+
+# vim swap files
+*.swp
+*.swo
+
+# build dir
+build
--- /dev/null
+[submodule "argon2"]
+ path = argon2
+ url = https://github.com/P-H-C/phc-winner-argon2
+ ignore = dirty
--- /dev/null
+dist: xenial
+sudo: required
+
+services:
+ - docker
+
+language: cpp
+
+env:
+ matrix:
+ - OPENDHT_TEST_JOB="opendht.classic"
+ - OPENDHT_TEST_JOB="opendht.llvm"
+ - OPENDHT_TEST_JOB="opendht.proxyserver"
+ - OPENDHT_TEST_JOB="opendht.proxyclient"
+ - OPENDHT_TEST_JOB="opendht.push"
+
+before_install:
+ - |
+ # non llvm builds
+ if [[ "$OPENDHT_TEST_JOB" != *"opendht.llvm"* ]]; then
+ docker pull aberaud/opendht-deps;
+ fi
+
+ - |
+ # classic build
+ if [[ "$OPENDHT_TEST_JOB" == *"opendht.llvm"* ]]; then
+ docker pull aberaud/opendht-deps-llvm
+ fi
+
+script:
+ - |
+ # classic build
+ if [[ "$OPENDHT_TEST_JOB" == *"opendht.classic"* ]]; then
+ docker build -t opendht -f docker/DockerfileTravis .;
+ fi
+
+ - |
+ # proxy builds
+ if [[ "$OPENDHT_TEST_JOB" != *"opendht.llvm"* ]] && [[ "$OPENDHT_TEST_JOB" != *"opendht.classic"* ]]; then
+ docker build -t opendht-proxy -f docker/DockerfileTravisProxy .;
+ options='-DOPENDHT_SANITIZE=On ';
+ if [[ "$OPENDHT_TEST_JOB" == *"opendht.proxyserver"* ]] || [[ "$OPENDHT_TEST_JOB" == *"opendht.push"* ]]; then
+ options+='-DOPENDHT_PROXY_SERVER=ON ';
+ else
+ options+='-DOPENDHT_PROXY_SERVER=OFF ';
+ fi
+ if [[ "$OPENDHT_TEST_JOB" == *"opendht.proxyclient"* ]] || [[ "$OPENDHT_TEST_JOB" == *"opendht.push"* ]]; then
+ options+='-DOPENDHT_PROXY_CLIENT=ON ';
+ else
+ options+='-DOPENDHT_PROXY_CLIENT=OFF ';
+ fi
+ if [[ "$OPENDHT_TEST_JOB" == *"opendht.push"* ]]; then
+ options+='-DOPENDHT_PUSH_NOTIFICATIONS=ON ';
+ else
+ options+='-DOPENDHT_PUSH_NOTIFICATIONS=OFF ';
+ fi
+ docker run opendht-proxy /bin/sh -c "cd /root/opendht && mkdir build && cd build && cmake -DCMAKE_INSTALL_PREFIX=/usr -DOPENDHT_PYTHON=ON -DOPENDHT_LTO=ON -DOPENDHT_TESTS=ON $options .. && make -j8 && ./opendht_unit_tests && make install";
+ fi
+
+ - |
+ # llvm build
+ if [[ "$OPENDHT_TEST_JOB" == *"opendht.llvm"* ]]; then
+ docker build -f docker/DockerfileTravisLlvm .
+ fi
+
+notifications:
+ email:
+ - adrien.beraud@savoirfairelinux.com
--- /dev/null
+cmake_minimum_required (VERSION 3.1)
+project (opendht)
+set (opendht_VERSION_MAJOR 1)
+set (opendht_VERSION_MINOR 8.1)
+set (opendht_VERSION ${opendht_VERSION_MAJOR}.${opendht_VERSION_MINOR})
+set (PACKAGE_VERSION ${opendht_VERSION})
+set (VERSION "${opendht_VERSION}")
+
+# Options
+option (OPENDHT_STATIC "Build static library" ON)
+option (OPENDHT_SHARED "Build shared library" ON)
+option (OPENDHT_LOG "Build with logs" ON)
+option (OPENDHT_PYTHON "Build Python bindings" OFF)
+option (OPENDHT_TOOLS "Build DHT tools" ON)
+option (OPENDHT_SYSTEMD "Install systemd module" OFF)
+option (OPENDHT_ARGON2 "Use included argon2 sources" OFF)
+option (OPENDHT_LTO "Build with LTO" OFF)
+option (OPENDHT_SANITIZE "Build with address sanitizer and stack protector" OFF)
+option (OPENDHT_PROXY_SERVER "Enable DHT proxy server, use Restbed and jsoncpp" OFF)
+option (OPENDHT_PUSH_NOTIFICATIONS "Enable push notifications support" OFF)
+option (OPENDHT_PROXY_SERVER_IDENTITY "Allow clients to use the node identity" OFF)
+option (OPENDHT_PROXY_CLIENT "Enable DHT proxy client, use Restbed and jsoncpp" OFF)
+option (OPENDHT_INDEX "Build DHT indexation feature" OFF)
+option (OPENDHT_TESTS "Add unit tests executable" OFF)
+
+find_package(Doxygen)
+option (OPENDHT_DOCUMENTATION "Create and install the HTML based API documentation (requires Doxygen)" ${DOXYGEN_FOUND})
+
+# Dependencies
+list (APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake")
+find_package (Threads)
+find_package (PkgConfig)
+find_package (GnuTLS 3.3 REQUIRED)
+pkg_search_module (Nettle nettle)
+find_package (Msgpack 1.2 REQUIRED)
+if (OPENDHT_TOOLS)
+ find_package (Readline 6 REQUIRED)
+endif ()
+if (NOT OPENDHT_ARGON2)
+ find_package(PkgConfig)
+ pkg_search_module(argon2 libargon2)
+ if (argon2_FOUND)
+ message("Argon2 found " ${argon2_req})
+ set(argon2_lib ", libargon2")
+ else ()
+ message("Argon2 not found, using included version.")
+ set(OPENDHT_ARGON2 ON)
+ endif()
+endif ()
+
+pkg_search_module(Jsoncpp jsoncpp)
+if (Jsoncpp_FOUND)
+ add_definitions(-DOPENDHT_JSONCPP)
+ list (APPEND opendht_SOURCES
+ src/base64.h
+ src/base64.cpp
+ )
+endif()
+
+if (OPENDHT_PROXY_SERVER OR OPENDHT_PROXY_CLIENT)
+ find_package(Restbed REQUIRED)
+ if (NOT Jsoncpp_FOUND)
+ message(SEND_ERROR "Jsoncpp is required for DHT proxy support")
+ endif()
+endif()
+
+# Build flags
+set (CMAKE_CXX_STANDARD 11)
+set (CMAKE_CXX_STANDARD_REQUIRED on)
+set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-return-type -Wall -Wextra -Wnon-virtual-dtor -pedantic-errors -fvisibility=hidden")
+if (OPENDHT_SANITIZE)
+ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address -fstack-protector-strong")
+endif ()
+set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMSGPACK_DISABLE_LEGACY_NIL -DMSGPACK_DISABLE_LEGACY_CONVERT")
+if (NOT CMAKE_BUILD_TYPE)
+ set(CMAKE_BUILD_TYPE Release)
+endif ()
+if (OPENDHT_LOG)
+ add_definitions(-DOPENDHT_LOG=true)
+else ()
+ add_definitions(-DOPENDHT_LOG=false)
+endif()
+if (OPENDHT_LTO)
+ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -flto")
+ if (CMAKE_COMPILER_IS_GNUCC)
+ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fuse-linker-plugin")
+ set (CMAKE_AR "gcc-ar")
+ set (CMAKE_NM "gcc-nm")
+ set (CMAKE_RANLIB "gcc-ranlib")
+ endif ()
+endif ()
+
+if (MSGPACK_INCLUDE_DIRS)
+ include_directories (SYSTEM "${MSGPACK_INCLUDE_DIRS}")
+endif ()
+if (GNUTLS_INCLUDE_DIRS)
+ include_directories (SYSTEM "${GNUTLS_INCLUDE_DIRS}")
+endif ()
+if (Nettle_INCLUDE_DIRS)
+ include_directories (SYSTEM "${Nettle_INCLUDE_DIRS}")
+endif ()
+if (Restbed_INCLUDE_DIR)
+ include_directories (SYSTEM "${Restbed_INCLUDE_DIR}")
+endif ()
+if (Jsoncpp_INCLUDE_DIRS)
+ include_directories (SYSTEM "${Jsoncpp_INCLUDE_DIRS}")
+endif ()
+link_directories (${Nettle_LIBRARY_DIRS})
+link_directories (${Jsoncpp_LIBRARY_DIRS})
+include_directories (
+ ./
+ include/
+ include/opendht/
+ ${CMAKE_CURRENT_BINARY_DIR}/include/
+)
+
+# Install dirs
+include (GNUInstallDirs)
+set (prefix ${CMAKE_INSTALL_PREFIX})
+set (exec_prefix "\${prefix}")
+set (libdir "\${exec_prefix}/${CMAKE_INSTALL_LIBDIR}")
+set (includedir "\${prefix}/${CMAKE_INSTALL_INCLUDEDIR}")
+set (bindir "${CMAKE_INSTALL_FULL_BINDIR}")
+set (sysconfdir "${CMAKE_INSTALL_FULL_SYSCONFDIR}")
+set (top_srcdir "${CMAKE_CURRENT_SOURCE_DIR}")
+
+# Sources
+list (APPEND opendht_SOURCES
+ src/utils.cpp
+ src/infohash.cpp
+ src/crypto.cpp
+ src/default_types.cpp
+ src/node.cpp
+ src/value.cpp
+ src/dht.cpp
+ src/op_cache.cpp
+ src/storage.h
+ src/listener.h
+ src/search.h
+ src/value_cache.h
+ src/op_cache.h
+ src/net.h
+ src/parsed_message.h
+ src/request.h
+ src/callbacks.cpp
+ src/routing_table.cpp
+ src/node_cache.cpp
+ src/network_engine.cpp
+ src/securedht.cpp
+ src/dhtrunner.cpp
+ src/log.cpp
+)
+
+list (APPEND opendht_HEADERS
+ include/opendht/def.h
+ include/opendht/utils.h
+ include/opendht/sockaddr.h
+ include/opendht/rng.h
+ include/opendht/crypto.h
+ include/opendht/infohash.h
+ include/opendht/default_types.h
+ include/opendht/node.h
+ include/opendht/value.h
+ include/opendht/dht.h
+ include/opendht/dht_interface.h
+ include/opendht/callbacks.h
+ include/opendht/routing_table.h
+ include/opendht/node_cache.h
+ include/opendht/network_engine.h
+ include/opendht/scheduler.h
+ include/opendht/rate_limiter.h
+ include/opendht/securedht.h
+ include/opendht/log.h
+ include/opendht/log_enable.h
+ include/opendht.h
+)
+
+if (OPENDHT_INDEX)
+ list (APPEND opendht_SOURCES src/indexation/pht.cpp)
+ list (APPEND opendht_HEADERS include/opendht/indexation/pht.h)
+ add_definitions(-DOPENDHT_INDEXATION)
+endif()
+
+if (OPENDHT_PROXY_SERVER)
+ add_definitions(-DOPENDHT_PROXY_SERVER=true)
+ if (OPENDHT_PROXY_SERVER_IDENTITY)
+ add_definitions(-DOPENDHT_PROXY_SERVER_IDENTITY=true)
+ else ()
+ add_definitions(-DOPENDHT_PROXY_SERVER_IDENTITY=false)
+ endif()
+ list (APPEND opendht_HEADERS
+ include/opendht/dht_proxy_server.h
+ )
+ list (APPEND opendht_SOURCES
+ src/dht_proxy_server.cpp
+ )
+else ()
+ add_definitions(-DENABLE_PROXY_SERVER=false)
+endif ()
+
+if (OPENDHT_PROXY_CLIENT)
+ add_definitions(-DOPENDHT_PROXY_CLIENT=true)
+ list (APPEND opendht_HEADERS
+ include/opendht/dht_proxy_client.h
+ )
+ list (APPEND opendht_SOURCES
+ src/dht_proxy_client.cpp
+ )
+else ()
+ add_definitions(-DOPENDHT_PROXY_CLIENT=false)
+endif ()
+
+if (OPENDHT_PROXY_SERVER OR OPENDHT_PROXY_CLIENT)
+ if (OPENDHT_PUSH_NOTIFICATIONS)
+ message("Using push notification")
+ add_definitions(-DOPENDHT_PUSH_NOTIFICATIONS=true)
+ else ()
+ add_definitions(-DOPENDHT_PUSH_NOTIFICATIONS=false)
+ endif ()
+ list (APPEND opendht_HEADERS
+ include/opendht/proxy.h
+ )
+endif ()
+
+if(OPENDHT_ARGON2)
+ # make sure argon2 submodule is up to date and initialized
+ message("Initializing Argon2 submodule")
+ execute_process(COMMAND git submodule update --init)
+
+ # add local argon2 files to build
+ list (APPEND opendht_SOURCES
+ argon2/src/argon2.c
+ argon2/src/core.c
+ argon2/src/blake2/blake2b.c
+ argon2/src/thread.c
+ argon2/src/ref.c
+ argon2/src/encoding.c
+ )
+ include_directories(argon2/include/)
+endif()
+
+# Targets
+if (OPENDHT_STATIC)
+ add_library (opendht-static STATIC
+ ${opendht_SOURCES}
+ ${opendht_HEADERS}
+ )
+ set_target_properties (opendht-static PROPERTIES OUTPUT_NAME "opendht")
+ if (OPENDHT_ARGON2)
+ target_include_directories(opendht-static SYSTEM PRIVATE argon2)
+ else ()
+ target_include_directories(opendht-static SYSTEM PRIVATE ${argon2_INCLUDE_DIRS})
+ endif ()
+ target_link_libraries(opendht-static
+ PRIVATE ${Restbed_LIBRARY} ${argon2_LIBRARIES}
+ PUBLIC ${CMAKE_THREAD_LIBS_INIT} ${GNUTLS_LIBRARIES} ${Nettle_LIBRARIES} ${Jsoncpp_LIBRARIES})
+ install (TARGETS opendht-static DESTINATION ${CMAKE_INSTALL_LIBDIR} EXPORT opendht)
+endif ()
+
+if (OPENDHT_SHARED)
+ add_library (opendht SHARED
+ ${opendht_SOURCES}
+ ${opendht_HEADERS}
+ )
+ set_target_properties (opendht PROPERTIES IMPORT_SUFFIX "_import.lib")
+ set_target_properties (opendht PROPERTIES SOVERSION ${opendht_VERSION_MAJOR} VERSION ${opendht_VERSION})
+ target_compile_definitions(opendht PRIVATE OPENDHT_BUILD)
+ if (OPENDHT_ARGON2)
+ target_include_directories(opendht SYSTEM PRIVATE argon2)
+ else ()
+ target_link_libraries(opendht PRIVATE ${argon2_LIBRARIES})
+ target_include_directories(opendht SYSTEM PRIVATE ${argon2_INCLUDE_DIRS})
+ endif ()
+ target_link_libraries(opendht PRIVATE ${CMAKE_THREAD_LIBS_INIT} ${GNUTLS_LIBRARIES} ${Nettle_LIBRARIES} ${Restbed_LIBRARY} ${Jsoncpp_LIBRARIES})
+ install (TARGETS opendht DESTINATION ${CMAKE_INSTALL_LIBDIR} EXPORT opendht)
+endif ()
+
+if (OPENDHT_TOOLS)
+ add_subdirectory(tools)
+endif ()
+add_subdirectory(doc)
+
+if (OPENDHT_PYTHON)
+ add_subdirectory(python)
+endif ()
+
+# CMake module
+include(CMakePackageConfigHelpers)
+write_basic_package_version_file("${CMAKE_CURRENT_BINARY_DIR}/opendhtConfigVersion.cmake"
+ VERSION ${opendht_VERSION}
+ COMPATIBILITY AnyNewerVersion
+)
+# PkgConfig module
+configure_file (
+ opendht.pc.in
+ opendht.pc
+ @ONLY
+)
+
+# Install targets
+install (DIRECTORY include DESTINATION ${CMAKE_INSTALL_PREFIX})
+install (FILES ${CMAKE_CURRENT_BINARY_DIR}/opendht.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
+install (EXPORT opendht DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/opendht FILE opendhtConfig.cmake)
+install (FILES ${CMAKE_CURRENT_BINARY_DIR}/opendhtConfigVersion.cmake DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/opendht)
+
+# Unit tests
+IF(OPENDHT_TESTS)
+ FIND_PACKAGE(Cppunit REQUIRED)
+ # unit testing
+ list (APPEND test_FILES
+ tests/infohashtester.h
+ tests/infohashtester.cpp
+ tests/cryptotester.h
+ tests/cryptotester.cpp
+ tests/dhtrunnertester.h
+ tests/dhtrunnertester.cpp
+ )
+ if (OPENDHT_PROXY_SERVER AND OPENDHT_PROXY_CLIENT)
+ list (APPEND test_FILES
+ tests/dhtproxytester.h
+ tests/dhtproxytester.cpp
+ )
+ endif()
+ add_executable(opendht_unit_tests
+ tests/tests_runner.cpp
+ ${test_FILES}
+ )
+ if (OPENDHT_SHARED)
+ TARGET_LINK_LIBRARIES(opendht_unit_tests opendht)
+ else ()
+ TARGET_LINK_LIBRARIES(opendht_unit_tests opendht-static)
+ endif ()
+ TARGET_LINK_LIBRARIES(opendht_unit_tests
+ ${CMAKE_THREAD_LIBS_INIT}
+ ${CPPUNIT_LIBRARIES}
+ ${GNUTLS_LIBRARIES}
+ )
+ enable_testing()
+ add_test(TEST opendht_unit_tests)
+ENDIF()
--- /dev/null
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
--- /dev/null
+## Ignore Visual Studio temporary files, build results, and\r
+## files generated by popular Visual Studio add-ons.\r
+\r
+#output directories\r
+DebugLib/\r
+Debug/\r
+ReleaseLib/\r
+Release/\r
+.config/\r
+# User-specific files\r
+*.suo\r
+*.user\r
+*.userosscache\r
+*.sln.docstates\r
+*.VC.VC.opendb\r
+*.VC.db\r
+*.db\r
+\r
+# User-specific files (MonoDevelop/Xamarin Studio)\r
+*.userprefs\r
+\r
+# Build results\r
+[Dd]ebug/\r
+[Dd]ebugPublic/\r
+[Rr]elease/\r
+[Rr]eleases/\r
+x64/\r
+x86/\r
+bld/\r
+[Bb]in/\r
+[Oo]bj/\r
+\r
+# Visual Studio 2015 cache/options directory\r
+.vs/\r
+# Uncomment if you have tasks that create the project's static files in wwwroot\r
+#wwwroot/\r
+\r
+# MSTest test Results\r
+[Tt]est[Rr]esult*/\r
+[Bb]uild[Ll]og.*\r
+\r
+# NUNIT\r
+*.VisualState.xml\r
+TestResult.xml\r
+\r
+# Build Results of an ATL Project\r
+[Dd]ebugPS/\r
+[Rr]eleasePS/\r
+dlldata.c\r
+\r
+# DNX\r
+project.lock.json\r
+artifacts/\r
+\r
+*_i.c\r
+*_p.c\r
+*_i.h\r
+*.ilk\r
+*.meta\r
+*.obj\r
+*.pch\r
+*.pdb\r
+*.pgc\r
+*.pgd\r
+*.rsp\r
+*.sbr\r
+*.tlb\r
+*.tli\r
+*.tlh\r
+*.tmp\r
+*.tmp_proj\r
+*.log\r
+*.vspscc\r
+*.vssscc\r
+.builds\r
+*.pidb\r
+*.svclog\r
+*.scc\r
+\r
+# Chutzpah Test files\r
+_Chutzpah*\r
+\r
+# Visual C++ cache files\r
+ipch/\r
+*.aps\r
+*.ncb\r
+*.opendb\r
+*.opensdf\r
+*.sdf\r
+*.cachefile\r
+\r
+# Visual Studio profiler\r
+*.psess\r
+*.vsp\r
+*.vspx\r
+*.sap\r
+\r
+# TFS 2012 Local Workspace\r
+$tf/\r
+\r
+# Guidance Automation Toolkit\r
+*.gpState\r
+\r
+# ReSharper is a .NET coding add-in\r
+_ReSharper*/\r
+*.[Rr]e[Ss]harper\r
+*.DotSettings.user\r
+\r
+# JustCode is a .NET coding add-in\r
+.JustCode\r
+\r
+# TeamCity is a build add-in\r
+_TeamCity*\r
+\r
+# DotCover is a Code Coverage Tool\r
+*.dotCover\r
+\r
+# NCrunch\r
+_NCrunch_*\r
+.*crunch*.local.xml\r
+nCrunchTemp_*\r
+\r
+# MightyMoose\r
+*.mm.*\r
+AutoTest.Net/\r
+\r
+# Web workbench (sass)\r
+.sass-cache/\r
+\r
+# Installshield output folder\r
+[Ee]xpress/\r
+\r
+# DocProject is a documentation generator add-in\r
+DocProject/buildhelp/\r
+DocProject/Help/*.HxT\r
+DocProject/Help/*.HxC\r
+DocProject/Help/*.hhc\r
+DocProject/Help/*.hhk\r
+DocProject/Help/*.hhp\r
+DocProject/Help/Html2\r
+DocProject/Help/html\r
+\r
+# Click-Once directory\r
+publish/\r
+\r
+# Publish Web Output\r
+*.[Pp]ublish.xml\r
+*.azurePubxml\r
+# TODO: Comment the next line if you want to checkin your web deploy settings \r
+# but database connection strings (with potential passwords) will be unencrypted\r
+*.pubxml\r
+*.publishproj\r
+\r
+# NuGet Packages\r
+*.nupkg\r
+# The packages folder can be ignored because of Package Restore\r
+**/packages/*\r
+# except build/, which is used as an MSBuild target.\r
+!**/packages/build/\r
+# Uncomment if necessary however generally it will be regenerated when needed\r
+#!**/packages/repositories.config\r
+# NuGet v3's project.json files produces more ignoreable files\r
+*.nuget.props\r
+*.nuget.targets\r
+\r
+# Microsoft Azure Build Output\r
+csx/\r
+*.build.csdef\r
+\r
+# Microsoft Azure Emulator\r
+ecf/\r
+rcf/\r
+\r
+# Microsoft Azure ApplicationInsights config file\r
+ApplicationInsights.config\r
+\r
+# Windows Store app package directory\r
+AppPackages/\r
+BundleArtifacts/\r
+\r
+# Visual Studio cache files\r
+# files ending in .cache can be ignored\r
+*.[Cc]ache\r
+# but keep track of directories ending in .cache\r
+!*.[Cc]ache/\r
+\r
+# Others\r
+ClientBin/\r
+~$*\r
+*~\r
+*.dbmdl\r
+*.dbproj.schemaview\r
+*.pfx\r
+*.publishsettings\r
+node_modules/\r
+orleans.codegen.cs\r
+\r
+# RIA/Silverlight projects\r
+Generated_Code/\r
+\r
+# Backup & report files from converting an old project file\r
+# to a newer Visual Studio version. Backup files are not needed,\r
+# because we have git ;-)\r
+_UpgradeReport_Files/\r
+Backup*/\r
+UpgradeLog*.XML\r
+UpgradeLog*.htm\r
+\r
+# SQL Server files\r
+*.mdf\r
+*.ldf\r
+\r
+# Business Intelligence projects\r
+*.rdl.data\r
+*.bim.layout\r
+*.bim_*.settings\r
+\r
+# Microsoft Fakes\r
+FakesAssemblies/\r
+\r
+# GhostDoc plugin setting file\r
+*.GhostDoc.xml\r
+\r
+# Node.js Tools for Visual Studio\r
+.ntvs_analysis.dat\r
+\r
+# Visual Studio 6 build log\r
+*.plg\r
+\r
+# Visual Studio 6 workspace options file\r
+*.opt\r
+\r
+# Visual Studio LightSwitch build output\r
+**/*.HTMLClient/GeneratedArtifacts\r
+**/*.DesktopClient/GeneratedArtifacts\r
+**/*.DesktopClient/ModelManifest.xml\r
+**/*.Server/GeneratedArtifacts\r
+**/*.Server/ModelManifest.xml\r
+_Pvt_Extensions\r
+\r
+# Paket dependency manager\r
+.paket/paket.exe\r
+\r
+# FAKE - F# Make\r
+.fake/\r
+\r
+!config.h\r
+contrib/build/
\ No newline at end of file
--- /dev/null
+@echo on\r
+SETLOCAL EnableDelayedExpansion\r
+\r
+set SRC=%~dp0\r
+\r
+set PATH=%PATH%;%ProgramFiles(x86)%\MSBuild\14.0\Bin\\r
+\r
+set MSBUILD_ARGS=/nologo /p:Configuration=Release /p:Platform=x64 /verbosity:normal /maxcpucount:%NUMBER_OF_PROCESSORS%\r
+\r
+set TOBUILD=( ^\r
+contrib\build\argon2\vs2015\Argon2Ref\Argon2Ref.vcxproj, ^\r
+contrib\build\gmp\SMP\libgmp.vcxproj, ^\r
+contrib\build\nettle\SMP\libnettle.vcxproj, ^\r
+contrib\build\nettle\SMP\libhogweed.vcxproj, ^\r
+contrib\build\libiconv\SMP\libiconv.vcxproj, ^\r
+contrib\build\nettle\SMP\libiconv.vcxproj, ^\r
+contrib\build\zlib\SMP\libzlib.vcxproj, ^\r
+contrib\build\gnutls\SMP\libgnutls.vcxproj, ^\r
+contrib\build\msgpack-c\msgpack_vc8.vcxproj, ^\r
+opendht.vcxproj, ^\r
+dhtchat.vcxproj, ^\r
+dhtscanner.vcxproj, ^\r
+dhtnode.vcxproj ^\r
+)\r
+\r
+for %%I in %TOBUILD% do (\r
+ call :build "%SRC%%%I"\r
+)\r
+\r
+exit /B %ERRORLEVEL%\r
+\r
+:build\r
+echo "Building project: " %*\r
+msbuild %* %MSBUILD_ARGS%\r
+exit /B 0
\ No newline at end of file
--- /dev/null
+--- a/vs2015/Argon2Ref/Argon2Ref.vcxproj\r
++++ b/vs2015/Argon2Ref/Argon2Ref.vcxproj\r
+@@ -59,7 +59,7 @@\r
+ <CharacterSet>MultiByte</CharacterSet>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">\r
+- <ConfigurationType>Application</ConfigurationType>\r
++ <ConfigurationType>StaticLibrary</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>v140</PlatformToolset>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+@@ -117,8 +117,8 @@\r
+ <IncludePath>$(SolutionDir)include;$(IncludePath)</IncludePath>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">\r
+- <OutDir>$(SolutionDir)vs2015\build\</OutDir>\r
+- <IntDir>$(SolutionDir)vs2015\build\$(ProjectName)\</IntDir>\r
++ <OutDir>$(ProjectDir)..\..\..\lib\x64</OutDir>\r
++ <IntDir>$(ProjectDir)vs2015\build\$(ProjectName)\</IntDir>\r
+ <IncludePath>$(SolutionDir)include;$(IncludePath)</IncludePath>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseStatic|x64'">\r
+@@ -179,6 +179,7 @@\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <SDLCheck>true</SDLCheck>\r
+ <PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
++ <AdditionalIncludeDirectories>$(ProjectDir)..\..\include</AdditionalIncludeDirectories>\r
+ </ClCompile>\r
+ <Link>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+@@ -223,4 +224,4 @@\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />\r
+ <ImportGroup Label="ExtensionTargets">\r
+ </ImportGroup>\r
+-</Project>\r
++</Project>\r
+\ No newline at end of file\r
+-- \r
+2.10.2.windows.1\r
--- /dev/null
+set BUILD=%SRC%..\build\r
+\r
+set ARGON2_VERSION=1eea0104e7cb2a38c617cf90ffa46ce5db6aceda\r
+set ARGON2_URL=https://github.com/P-H-C/phc-winner-argon2/archive/%ARGON2_VERSION%.tar.gz\r
+\r
+mkdir %BUILD%\r
+\r
+if %USE_CACHE%==1 (\r
+ copy %CACHE_DIR%\%ARGON2_VERSION%.tar.gz %cd%\r
+) else (\r
+ wget %ARGON2_URL%\r
+)\r
+\r
+7z -y x %ARGON2_VERSION%.tar.gz && 7z -y x %ARGON2_VERSION%.tar -o%BUILD%\r
+del %ARGON2_VERSION%.tar && del %ARGON2_VERSION%.tar.gz && del %BUILD%\pax_global_header\r
+rename %BUILD%\phc-winner-argon2-%ARGON2_VERSION% argon2\r
+\r
+cd %BUILD%\argon2\r
+\r
+git apply --reject --whitespace=fix %SRC%\argon2\argon2-uwp.patch\r
+\r
+cd %SRC%
\ No newline at end of file
--- /dev/null
+@echo OFF\r
+SETLOCAL EnableDelayedExpansion\r
+\r
+set SRC=%~dp0\r
+\r
+if "%USE_CACHE%"=="" (\r
+ set USE_CACHE=0\r
+)\r
+\r
+set DEPENDENCIES=( ^\r
+argon2, ^\r
+gmp, ^\r
+gnutls, ^\r
+iconv, ^\r
+msgpack, ^\r
+nettle, ^\r
+opendht, ^\r
+zlib ^\r
+)\r
+\r
+for %%I in %DEPENDENCIES% do (\r
+ call %SRC%\%%I\fetch_and_patch.bat\r
+)
\ No newline at end of file
--- /dev/null
+set BUILD=%SRC%..\build\r
+\r
+set GMP_VERSION=3c8f5a0ae0c2ac9ff0ea31b27f71b152979b556d\r
+set GMP_URL=https://github.com/ShiftMediaProject/gmp/archive/%GMP_VERSION%.tar.gz\r
+\r
+mkdir %BUILD%\r
+\r
+if %USE_CACHE%==1 (\r
+ copy %CACHE_DIR%\%GMP_VERSION%.tar.gz %cd%\r
+) else (\r
+ wget %GMP_URL%\r
+)\r
+\r
+7z -y x %GMP_VERSION%.tar.gz && 7z -y x %GMP_VERSION%.tar -o%BUILD%\r
+del %GMP_VERSION%.tar && del %GMP_VERSION%.tar.gz && del %BUILD%\pax_global_header\r
+rename %BUILD%\gmp-%GMP_VERSION% gmp\r
+\r
+cd %BUILD%\gmp\r
+\r
+git apply --reject --whitespace=fix %SRC%\gmp\gmp-uwp.patch\r
+\r
+cd %SRC%
\ No newline at end of file
--- /dev/null
+--- a/SMP/libgmp.vcxproj
++++ b/SMP/libgmp.vcxproj
+@@ -1248,6 +1248,7 @@
+ <ProjectGuid>{02B94302-23D6-43EF-8865-95CDE99D5DC2}</ProjectGuid>
+ <RootNamespace>libgmp</RootNamespace>
+ <ProjectName>libgmp</ProjectName>
++ <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+@@ -1368,65 +1369,65 @@
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libgmpd</TargetName>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libgmpd</TargetName>
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='DebugDLL|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>gmpd</TargetName>
+ <LinkIncremental>false</LinkIncremental>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='DebugDLL|x64'">
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>gmpd</TargetName>
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
+ <LinkIncremental>false</LinkIncremental>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libgmp</TargetName>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseLTO|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libgmp</TargetName>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libgmp</TargetName>
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseLTO|x64'">
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libgmp</TargetName>
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseDLL|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>gmp</TargetName>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseDLL|x64'">
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>gmp</TargetName>
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+--- a/SMP/config.h
++++ b/SMP/config.h
+@@ -202,7 +202,7 @@
+ /* #undef HAVE_NATIVE_mpn_add_n_sub_n */
+ #define HAVE_NATIVE_mpn_add_nc 1
+ #if defined(__x86_64) || defined(_M_X64)
+-#define HAVE_NATIVE_mpn_addaddmul_1msb0 1
++#define HAVE_NATIVE_mpn_addaddmul_1msb0 0
+ #define HAVE_NATIVE_mpn_addlsh1_n 1
+ #define HAVE_NATIVE_mpn_addlsh2_n 1
+ #define HAVE_NATIVE_mpn_addlsh_n 1
+--
+2.8.1.windows.1
+
--- /dev/null
+set BUILD=%SRC%..\build\r
+\r
+set GNUTLS_VERSION=f2d0ade53ff644da55244aed79d05eca78d11a2f\r
+set GNUTLS_URL=https://github.com/ShiftMediaProject/gnutls/archive/%GNUTLS_VERSION%.tar.gz\r
+\r
+mkdir %BUILD%\r
+\r
+if %USE_CACHE%==1 (\r
+ copy %CACHE_DIR%\%GNUTLS_VERSION%.tar.gz %cd%\r
+) else (\r
+ wget %GNUTLS_URL%\r
+)\r
+\r
+7z -y x %GNUTLS_VERSION%.tar.gz && 7z -y x %GNUTLS_VERSION%.tar -o%BUILD%\r
+del %GNUTLS_VERSION%.tar && del %GNUTLS_VERSION%.tar.gz && del %BUILD%\pax_global_header\r
+rename %BUILD%\gnutls-%GNUTLS_VERSION% gnutls\r
+\r
+cd %BUILD%\gnutls\r
+\r
+git apply --reject --whitespace=fix %SRC%\gnutls\gnutls-no-egd.patch\r
+git apply --reject --whitespace=fix %SRC%\gnutls\read-file-limits.h.patch\r
+git apply --reject --whitespace=fix %SRC%\gnutls\gnutls-uwp.patch\r
+\r
+cd %SRC%
\ No newline at end of file
--- /dev/null
+ {a => b}/lib/nettle/Makefile.am | 2 +-
+ {a => b}/lib/nettle/Makefile.in | 11 +++++------
+ {a => b}/lib/nettle/rnd-common.c | 7 +++++--
+ 3 files changed, 11 insertions(+), 9 deletions(-)
+
+diff --git a/lib/nettle/Makefile.am b/lib/nettle/Makefile.am
+index e84ed1f..4767663 100644
+--- a/lib/nettle/Makefile.am
++++ b/lib/nettle/Makefile.am
+@@ -38,7 +38,7 @@ endif
+
+ noinst_LTLIBRARIES = libcrypto.la
+
+-libcrypto_la_SOURCES = pk.c mpi.c mac.c cipher.c init.c egd.c egd.h \
++libcrypto_la_SOURCES = pk.c mpi.c mac.c cipher.c init.c \
+ gnettle.h rnd-common.h rnd-common.c \
+ rnd.c
+
+diff --git a/lib/nettle/Makefile.in b/lib/nettle/Makefile.in
+index 88ae7cf..6ddd536 100644
+--- a/lib/nettle/Makefile.in
++++ b/lib/nettle/Makefile.in
+@@ -221,8 +221,8 @@ CONFIG_CLEAN_FILES =
+ CONFIG_CLEAN_VPATH_FILES =
+ LTLIBRARIES = $(noinst_LTLIBRARIES)
+ libcrypto_la_LIBADD =
+-am__libcrypto_la_SOURCES_DIST = pk.c mpi.c mac.c cipher.c init.c egd.c \
+- egd.h gnettle.h rnd-common.h rnd-common.c rnd.c rnd-fips.c \
++am__libcrypto_la_SOURCES_DIST = pk.c mpi.c mac.c cipher.c init.c \
++ gnettle.h rnd-common.h rnd-common.c rnd.c rnd-fips.c \
+ int/drbg-aes-self-test.c int/dsa-fips.h \
+ int/dsa-keygen-fips186.c int/dsa-validate.c \
+ int/provable-prime.c int/drbg-aes.c int/drbg-aes.h \
+@@ -233,7 +233,7 @@ am__dirstamp = $(am__leading_dot)dirstamp
+ @ENABLE_FIPS140_TRUE@ int/dsa-keygen-fips186.lo \
+ @ENABLE_FIPS140_TRUE@ int/dsa-validate.lo int/provable-prime.lo \
+ @ENABLE_FIPS140_TRUE@ int/drbg-aes.lo int/rsa-keygen-fips186.lo
+-am_libcrypto_la_OBJECTS = pk.lo mpi.lo mac.lo cipher.lo init.lo egd.lo \
++am_libcrypto_la_OBJECTS = pk.lo mpi.lo mac.lo cipher.lo init.lo \
+ rnd-common.lo rnd.lo $(am__objects_1)
+ libcrypto_la_OBJECTS = $(am_libcrypto_la_OBJECTS)
+ AM_V_lt = $(am__v_lt_@AM_V@)
+@@ -1279,8 +1279,8 @@ AM_CPPFLAGS = -I$(srcdir)/int -I$(srcdir)/../../gl \
+ -I$(builddir)/../includes -I$(builddir)/../../gl \
+ -I$(srcdir)/.. $(am__append_1)
+ noinst_LTLIBRARIES = libcrypto.la
+-libcrypto_la_SOURCES = pk.c mpi.c mac.c cipher.c init.c egd.c egd.h \
+- gnettle.h rnd-common.h rnd-common.c rnd.c $(am__append_2)
++libcrypto_la_SOURCES = pk.c mpi.c mac.c cipher.c init.c gnettle.h \
++ rnd-common.h rnd-common.c rnd.c $(am__append_2)
+ all: all-am
+
+ .SUFFIXES:
+@@ -1355,7 +1355,6 @@ distclean-compile:
+ -rm -f *.tab.c
+
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cipher.Plo@am__quote@
+-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/egd.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/init.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mac.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mpi.Plo@am__quote@
+diff --git a/lib/nettle/rnd-common.c b/lib/nettle/rnd-common.c
+index c69ff3b..80b5f32 100644
+--- a/lib/nettle/rnd-common.c
++++ b/lib/nettle/rnd-common.c
+@@ -139,7 +139,7 @@ void _rnd_system_entropy_deinit(void)
+ #include <sys/time.h>
+ #include <fcntl.h>
+ #include <locks.h>
+-#include "egd.h"
++//#include "egd.h"
+
+ static int _gnutls_urandom_fd = -1;
+ static mode_t _gnutls_urandom_fd_mode = 0;
+@@ -208,6 +208,7 @@ static int _rnd_get_system_entropy_urandom(void* _rnd, size_t size)
+ return 0;
+ }
+
++#if 0
+ static
+ int _rnd_get_system_entropy_egd(void* _rnd, size_t size)
+ {
+@@ -232,6 +233,7 @@ int _rnd_get_system_entropy_egd(void* _rnd, size_t size)
+
+ return 0;
+ }
++#endif
+
+ int _rnd_system_entropy_check(void)
+ {
+@@ -268,6 +270,7 @@ int _rnd_system_entropy_init(void)
+
+ return 0;
+ fallback:
++#if 0
+ _gnutls_urandom_fd = _rndegd_connect_socket();
+ if (_gnutls_urandom_fd < 0) {
+ _gnutls_debug_log("Cannot open egd socket!\n");
+@@ -281,7 +284,7 @@ fallback:
+ }
+
+ _rnd_get_system_entropy = _rnd_get_system_entropy_egd;
+-
++#endif
+ return 0;
+ }
+
--- /dev/null
+--- a/SMP/libgnutls.vcxproj
++++ b/SMP/libgnutls.vcxproj
+@@ -743,91 +744,91 @@
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libgnutlsd</TargetName>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libgnutlsd</TargetName>
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='DebugDLL|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>gnutlsd</TargetName>
+ <LinkIncremental>false</LinkIncremental>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='DebugDLLStaticDeps|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>gnutlsd</TargetName>
+ <LinkIncremental>false</LinkIncremental>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='DebugDLL|x64'">
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>gnutlsd</TargetName>
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
+ <LinkIncremental>false</LinkIncremental>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='DebugDLLStaticDeps|x64'">
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>gnutlsd</TargetName>
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
+ <LinkIncremental>false</LinkIncremental>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libgnutls</TargetName>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseLTO|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libgnutls</TargetName>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libgnutls</TargetName>
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseLTO|x64'">
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libgnutls</TargetName>
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseDLL|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>gnutls</TargetName>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseDLLStaticDeps|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>gnutls</TargetName>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseDLL|x64'">
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>gnutls</TargetName>
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseDLLStaticDeps|x64'">
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>gnutls</TargetName>
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">\r
+@@ -1834,7 +1834,7 @@ del /f /q $(OutDir)\licenses\gnutls.txt
+ <BufferSecurityCheck>false</BufferSecurityCheck>\r
+ <FloatingPointModel>Fast</FloatingPointModel>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+- <PreprocessorDefinitions>HAVE_CONFIG_H;inline=__inline;ASN1_BUILDING;ASN1_STATIC;LOCALEDIR=".";__func__=__FUNCTION__;ASM_X86;ASM_X86_64;_WIN32_WINNT=0x0600;NDEBUG;_WINDOWS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
++ <PreprocessorDefinitions>RING_UWP;HAVE_WIN32_LOCKS;HAVE_CONFIG_H;inline=__inline;ASN1_BUILDING;ASN1_STATIC;LOCALEDIR=".";__func__=__FUNCTION__;ASM_X86_64;_WIN32_WINNT=0x0600;NDEBUG;_WINDOWS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>.\;.\lib;..\lib;..\lib\accelerated;..\lib\auth;..\lib\ext;..\lib\extras;..\lib\minitasn1;..\lib\nettle;..\lib\nettle\int;..\lib\openpgp;..\lib\opencdk;..\lib\x509;..\lib\includes;..\gl;$(OutDir)\include;..\lib\accelerated\x86;$(ProjectDir)\..\..\prebuilt\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <DisableSpecificWarnings>4996;4244;4018;4146;4267;4028;4101;4020;4047;4024;4005;4311;4312;4334;4116;4090;%(DisableSpecificWarnings)</DisableSpecificWarnings>\r
+@@ -1850,7 +1850,7 @@ del /f /q $(OutDir)\licenses\gnutls.txt
+ <OutputFile>$(OutDir)\lib\x64\$(TargetName)$(TargetExt)</OutputFile>\r
+ <TargetMachine>MachineX64</TargetMachine>\r
+ <SubSystem>Windows</SubSystem>\r
+- <AdditionalDependencies>Crypt32.lib;Ws2_32.lib;libzlib.lib;libiconv.lib;libhogweed.lib;libgmp.lib;libnettle.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
++ <AdditionalDependencies>bcrypt.lib;Ws2_32.lib;libzlib.lib;libiconv.lib;libhogweed.lib;libgmp.lib;libnettle.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ <AdditionalLibraryDirectories>$(OutDir)\lib\x64\;$(ProjectDir)\..\..\prebuilt\lib\x64\;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\r
+ <AdditionalOptions>/IGNORE:4006,4221,4042 %(AdditionalOptions)</AdditionalOptions>\r
+ </Lib>\r
+--- a/lib/nettle/rnd-common.c
++++ b/lib/nettle/rnd-common.c
+@@ -45,6 +45,7 @@
+ #include <sys/types.h>
+ #include <sys/stat.h>
+ #include <unistd.h>
++#include <bcrypt.h>
+
+ /* gnulib wants to claim strerror even if it cannot provide it. WTF */
+ #undef strerror
+@@ -88,16 +89,25 @@ void _rnd_get_event(struct event_st *e)
+ #include <wincrypt.h>
+
+ static HCRYPTPROV device_fd = 0;
++static BCRYPT_ALG_HANDLE bdevice_fd = 0;
+
+ static
+ int _rnd_get_system_entropy_win32(void* rnd, size_t size)
+ {
++#ifndef RING_UWP
+ if (!CryptGenRandom(device_fd, (DWORD) size, rnd)) {
+ _gnutls_debug_log("Error in CryptGenRandom: %d\n",
+ (int)GetLastError());
+ return GNUTLS_E_RANDOM_DEVICE_ERROR;
+ }
+-
++#else
++ NTSTATUS ret = BCryptGenRandom(bdevice_fd, rnd, (ULONG) size, 0);
++ if (!(BCRYPT_SUCCESS(ret))) {
++ _gnutls_debug_log("Error in BCryptGenRandom: %d\n",
++ (int)GetLastError());
++ return GNUTLS_E_RANDOM_DEVICE_ERROR;
++ }
++#endif
+ return 0;
+ }
+
+@@ -111,7 +121,7 @@ int _rnd_system_entropy_check(void)
+ int _rnd_system_entropy_init(void)
+ {
+ int old;
+-
++#ifndef RING_UWP
+ if (!CryptAcquireContext
+ (&device_fd, NULL, NULL, PROV_RSA_FULL,
+ CRYPT_SILENT | CRYPT_VERIFYCONTEXT)) {
+@@ -119,13 +129,26 @@ int _rnd_system_entropy_init(void)
+ ("error in CryptAcquireContext!\n");
+ return GNUTLS_E_RANDOM_DEVICE_ERROR;
+ }
+-
++#else
++ NTSTATUS ret = BCryptOpenAlgorithmProvider(&bdevice_fd,
++ BCRYPT_RNG_ALGORITHM, MS_PRIMITIVE_PROVIDER,
++ 0);
++ if (!(BCRYPT_SUCCESS(ret))) {
++ _gnutls_debug_log
++ ("error in BCryptOpenAlgorithmProvider!\n");
++ return GNUTLS_E_RANDOM_DEVICE_ERROR;
++ }
++#endif
+ return 0;
+ }
+
+ void _rnd_system_entropy_deinit(void)
+ {
++#ifndef RING_UWP
+ CryptReleaseContext(device_fd, 0);
++#else
++ BCryptCloseAlgorithmProvider(bdevice_fd, 0);
++#endif
+ }
+
+ #else /* POSIX */
+--- a/lib/opencdk/misc.c
++++ b/lib/opencdk/misc.c
+@@ -34,7 +34,6 @@
+ #include <gnutls_int.h>
+ #include <gnutls_str.h>
+
+-
+ u32 _cdk_buftou32(const byte * buf)
+ {
+ u32 u;
+@@ -149,6 +148,13 @@ int _cdk_check_args(int overwrite, const char *in, const char *out)
+ }
+
+ #ifdef _WIN32
++#if (_WIN32_WINNT >= 0x0600)
++FILE *_cdk_tmpfile(void)
++{
++ return NULL;
++}
++#else
++
+ #include <io.h>
+ #include <fcntl.h>
+
+@@ -183,6 +189,7 @@ FILE *_cdk_tmpfile(void)
+ _close(fd);
+ return NULL;
+ }
++#endif /*_WIN32*/
+ #else
+ FILE *_cdk_tmpfile(void)
+ {
+--- a/lib/system-keys-win.c
++++ b/lib/system-keys-win.c
+@@ -20,9 +20,10 @@
+
+ // Before including any Windows header we need to set _WIN32_WINNT to Vista
+ // (or higher) so that the NCRYPT stuff can be used.
+-#if _WIN32_WINNT < 0x600
++#define _WIN32_WINNT 0x0A00
++#if _WIN32_WINNT < 0x0600
+ #undef _WIN32_WINNT
+-#define _WIN32_WINNT 0x600
++#define _WIN32_WINNT 0x0600
+ #endif
+
+
+@@ -1075,7 +1076,11 @@ int _gnutls_system_key_init(void)
+ int ret;
+
+ #ifdef DYN_NCRYPT
+- ncrypt_lib = LoadLibraryA("ncrypt.dll");
++#if (_WIN32_WINNT < 0x0A00)
++ ncrypt_lib = LoadLibrary("ncrypt.dll");
++#else
++ ncrypt_lib = LoadPackagedLibrary("ncrypt.dll", 0);
++#endif
+ if (ncrypt_lib == NULL) {
+ return gnutls_assert_val(GNUTLS_E_CRYPTO_INIT_FAILED);
+ }
+--- a/lib/system-keys-win.c
++++ b/lib/system-keys-win.c
+@@ -396,7 +396,11 @@ _gnutls_privkey_import_system_url(gnutls_privkey_t pkey,
+ blob.cbData = id_size;
+ blob.pbData = id;
+
+- store = CertOpenSystemStore(0, "MY");
++ store = CertOpenStore( CERT_STORE_PROV_SYSTEM_A,
++ X509_ASN_ENCODING | PKCS_7_ASN_ENCODING,
++ 0,
++ CERT_SYSTEM_STORE_CURRENT_USER,
++ (const void*)"MY");
+ if (store == NULL) {
+ gnutls_assert();
+ ret = GNUTLS_E_FILE_ERROR;
+@@ -535,7 +539,11 @@ _gnutls_x509_crt_import_system_url(gnutls_x509_crt_t crt, const char *url)
+ blob.cbData = id_size;
+ blob.pbData = id;
+
+- store = CertOpenSystemStore(0, "MY");
++ store = CertOpenStore( CERT_STORE_PROV_SYSTEM_A,
++ X509_ASN_ENCODING | PKCS_7_ASN_ENCODING,
++ 0,
++ CERT_SYSTEM_STORE_CURRENT_USER,
++ (const void*)"MY");
+ if (store == NULL) {
+ gnutls_assert();
+ ret = GNUTLS_E_FILE_ERROR;
+@@ -776,7 +784,11 @@ gnutls_system_key_iter_get_info(gnutls_system_key_iter_t *iter,
+ if (*iter == NULL)
+ return gnutls_assert_val(GNUTLS_E_MEMORY_ERROR);
+
+- (*iter)->store = CertOpenSystemStore(0, "MY");
++ (*iter)->store = CertOpenStore( CERT_STORE_PROV_SYSTEM_A,
++ X509_ASN_ENCODING | PKCS_7_ASN_ENCODING,
++ 0,
++ CERT_SYSTEM_STORE_CURRENT_USER,
++ (const void*)"MY");
+ if ((*iter)->store == NULL) {
+ gnutls_free(*iter);
+ *iter = NULL;
+@@ -841,7 +853,11 @@ int gnutls_system_key_delete(const char *cert_url, const char *key_url)
+ blob.cbData = id_size;
+ blob.pbData = id;
+
+- store = CertOpenSystemStore(0, "MY");
++ store = CertOpenStore( CERT_STORE_PROV_SYSTEM_A,
++ X509_ASN_ENCODING | PKCS_7_ASN_ENCODING,
++ 0,
++ CERT_SYSTEM_STORE_CURRENT_USER,
++ (const void*)"MY");
+ if (store != NULL) {
+ do {
+ cert = CertFindCertificateInStore(store,
+--- a/lib/system.c
++++ b/lib/system.c
+@@ -446,9 +446,17 @@ int add_system_trust(gnutls_x509_trust_list_t list, unsigned int tl_flags,
+ gnutls_datum_t data;
+
+ if (i == 0)
+- store = CertOpenSystemStore(0, "ROOT");
++ store = CertOpenStore( CERT_STORE_PROV_SYSTEM_A,
++ X509_ASN_ENCODING | PKCS_7_ASN_ENCODING,
++ 0,
++ CERT_SYSTEM_STORE_CURRENT_USER,
++ (const void*)"ROOT");
+ else
+- store = CertOpenSystemStore(0, "CA");
++ store = CertOpenStore( CERT_STORE_PROV_SYSTEM_A,
++ X509_ASN_ENCODING | PKCS_7_ASN_ENCODING,
++ 0,
++ CERT_SYSTEM_STORE_CURRENT_USER,
++ (const void*)"CA");
+
+ if (store == NULL)
+ return GNUTLS_E_FILE_ERROR;
+--- a/lib/accelerated/x86/x86-common.c
++++ b/lib/accelerated/x86/x86-common.c
+@@ -652,6 +652,7 @@ void register_x86_intel_crypto(unsigned capabilities)
+
+ void register_x86_crypto(void)
+ {
++#ifndef RING_UWP
+ unsigned capabilities = 0;
+ char *p;
+ p = secure_getenv("GNUTLS_CPUID_OVERRIDE");
+@@ -663,5 +664,7 @@ void register_x86_crypto(void)
+ #ifdef ENABLE_PADLOCK
+ register_x86_padlock_crypto(capabilities);
+ #endif
++#endif
++ register_x86_intel_crypto(0);
+ }
+
+--- a/lib/gnutls_global.c
++++ b/lib/gnutls_global.c
+@@ -240,7 +240,9 @@ int gnutls_global_init(void)
+
+ _gnutls_switch_lib_state(LIB_STATE_INIT);
+
++#ifndef RING_UWP
+ e = secure_getenv("GNUTLS_DEBUG_LEVEL");
++#endif
+ if (e != NULL) {
+ level = atoi(e);
+ gnutls_global_set_log_level(level);
+@@ -473,8 +475,10 @@ const char *e;
+ if (_gnutls_global_init_skip() != 0)
+ return;
+
++#ifndef RING_UWP
+ e = secure_getenv("GNUTLS_NO_EXPLICIT_INIT");
+- if (e != NULL) {
++#endif
++ if (e != NULL) {
+ ret = atoi(e);
+ if (ret == 1)
+ return;
+@@ -494,7 +498,9 @@ static void _DESTRUCTOR lib_deinit(void)
+ if (_gnutls_global_init_skip() != 0)
+ return;
+
++#ifndef RING_UWP
+ e = secure_getenv("GNUTLS_NO_EXPLICIT_INIT");
++#endif
+ if (e != NULL) {
+ int ret = atoi(e);
+ if (ret == 1)
+--- a/lib/gnutls_kx.c
++++ b/lib/gnutls_kx.c
+@@ -105,7 +105,9 @@ static void write_nss_key_log(gnutls_session_t session, const gnutls_datum_t *pr
+
+ if (!checked_env) {
+ checked_env = 1;
++#ifndef RING_UWP
+ keylogfile = secure_getenv("SSLKEYLOGFILE");
++#endif
+ }
+
+ if (keylogfile == NULL)
+--- a/lib/system.c
++++ b/lib/system.c
+@@ -340,7 +340,11 @@ void gnutls_system_global_deinit(void)
+ */
+ int _gnutls_find_config_path(char *path, size_t max_size)
+ {
+- const char *home_dir = secure_getenv("HOME");
++#ifndef RING_UWP
++ const char *home_dir = secure_getenv("HOME");
++#else
++ const char *home_dir = NULL;
++#endif
+
+ if (home_dir != NULL && home_dir[0] != 0) {
+ snprintf(path, max_size, "%s/" CONFIG_PATH, home_dir);
+@@ -349,8 +353,13 @@ int _gnutls_find_config_path(char *path, size_t max_size)
+
+ #ifdef _WIN32
+ if (home_dir == NULL || home_dir[0] == '\0') {
++#ifndef RING_UWP
+ const char *home_drive = getenv("HOMEDRIVE");
+ const char *home_path = getenv("HOMEPATH");
++#else
++ const char *home_drive = NULL;
++ const char *home_path = NULL;
++#endif
+
+ if (home_drive != NULL && home_path != NULL) {
+ snprintf(path, max_size, "%s%s\\" CONFIG_PATH, home_drive, home_path);
+2.8.1.windows.1
+
--- /dev/null
+--- gnutls/gl/read-file.c.orig 2012-03-06 20:59:29.600593329 -0500
++++ gnutls/gl/read-file.c 2012-03-06 20:59:44.568593328 -0500
+@@ -35,6 +35,9 @@
+ /* Get errno. */
+ #include <errno.h>
+
++/* Get SIZE_MAX */
++#include <limits.h>
++
+ /* Read a STREAM and return a newly allocated string with the content,
+ and set *LENGTH to the length of the string. The string is
+ zero-terminated, but the terminating zero byte is not counted in
--- /dev/null
+set BUILD=%SRC%..\build\r
+\r
+set ICONV_VERSION=65ab92f7a1699ecc39e37fb81f66e5a42aaa35c4\r
+set ICONV_URL=https://github.com/ShiftMediaProject/libiconv/archive/%ICONV_VERSION%.tar.gz\r
+\r
+mkdir %BUILD%\r
+\r
+if %USE_CACHE%==1 (\r
+ copy %CACHE_DIR%\%ICONV_VERSION%.tar.gz %cd%\r
+) else (\r
+ wget %ICONV_URL%\r
+)\r
+\r
+7z -y x %ICONV_VERSION%.tar.gz && 7z -y x %ICONV_VERSION%.tar -o%BUILD%\r
+del %ICONV_VERSION%.tar && del %ICONV_VERSION%.tar.gz && del %BUILD%\pax_global_header\r
+rename %BUILD%\libiconv-%ICONV_VERSION% libiconv\r
+\r
+cd %BUILD%\libiconv\r
+\r
+git apply --reject --whitespace=fix %SRC%\iconv\libiconv-uwp.patch\r
+\r
+cd %SRC%
\ No newline at end of file
--- /dev/null
+--- a/SMP/libiconv.vcxproj
++++ b/SMP/libiconv.vcxproj
+@@ -45,6 +45,7 @@
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}</ProjectGuid>
+ <RootNamespace>libiconv</RootNamespace>
++ <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+@@ -164,65 +165,65 @@
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libiconvd</TargetName>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libiconvd</TargetName>
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='DebugDLL|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>iconvd</TargetName>
+ <LinkIncremental>false</LinkIncremental>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='DebugDLL|x64'">
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>iconvd</TargetName>
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
+ <LinkIncremental>false</LinkIncremental>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libiconv</TargetName>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseLTO|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libiconv</TargetName>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libiconv</TargetName>
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseLTO|x64'">
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libiconv</TargetName>
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseDLL|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>iconv</TargetName>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseDLL|x64'">
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>iconv</TargetName>
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+--
+2.8.1.windows.1
+
--- /dev/null
+set BUILD=%SRC%..\build\r
+\r
+set MSGPACK_VERSION=1df97bc37b363a340c5ad06c5cbcc53310aaff80\r
+set MSGPACK_URL=https://github.com/msgpack/msgpack-c/archive/%MSGPACK_VERSION%.tar.gz\r
+\r
+mkdir %BUILD%\r
+\r
+if %USE_CACHE%==1 (\r
+ copy %CACHE_DIR%\%MSGPACK_VERSION%.tar.gz %cd%\r
+) else (\r
+ wget %MSGPACK_URL%\r
+)\r
+\r
+7z -y x %MSGPACK_VERSION%.tar.gz && 7z -y x %MSGPACK_VERSION%.tar -o%BUILD%\r
+del %MSGPACK_VERSION%.tar && del %MSGPACK_VERSION%.tar.gz && del %BUILD%\pax_global_header\r
+rename %BUILD%\msgpack-c-%MSGPACK_VERSION% msgpack-c\r
+\r
+cd %BUILD%\msgpack-c\r
+\r
+git apply --reject --whitespace=fix %SRC%\msgpack\msgpack-uwp.patch\r
+\r
+cd %SRC%
\ No newline at end of file
--- /dev/null
+--- /dev/null
++++ b/msgpack_vc8.vcxproj
+@@ -0,0 +1,214 @@
++<?xml version="1.0" encoding="utf-8"?>
++<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
++ <ItemGroup Label="ProjectConfigurations">
++ <ProjectConfiguration Include="Debug|Win32">
++ <Configuration>Debug</Configuration>
++ <Platform>Win32</Platform>
++ </ProjectConfiguration>
++ <ProjectConfiguration Include="Debug|x64">
++ <Configuration>Debug</Configuration>
++ <Platform>x64</Platform>
++ </ProjectConfiguration>
++ <ProjectConfiguration Include="Release|Win32">
++ <Configuration>Release</Configuration>
++ <Platform>Win32</Platform>
++ </ProjectConfiguration>
++ <ProjectConfiguration Include="Release|x64">
++ <Configuration>Release</Configuration>
++ <Platform>x64</Platform>
++ </ProjectConfiguration>
++ </ItemGroup>
++ <PropertyGroup Label="Globals">
++ <ProjectName>MessagePack</ProjectName>
++ <ProjectGuid>{122A2EA4-B283-4241-9655-786DE78283B2}</ProjectGuid>
++ <RootNamespace>MessagePack</RootNamespace>
++ <Keyword>Win32Proj</Keyword>
++ <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
++ </PropertyGroup>
++ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
++ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
++ <ConfigurationType>StaticLibrary</ConfigurationType>
++ <PlatformToolset>v140</PlatformToolset>
++ <CharacterSet>Unicode</CharacterSet>
++ <WholeProgramOptimization>false</WholeProgramOptimization>
++ </PropertyGroup>
++ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
++ <ConfigurationType>StaticLibrary</ConfigurationType>
++ <PlatformToolset>v140</PlatformToolset>
++ <CharacterSet>Unicode</CharacterSet>
++ <WholeProgramOptimization>false</WholeProgramOptimization>
++ </PropertyGroup>
++ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
++ <ConfigurationType>StaticLibrary</ConfigurationType>
++ <PlatformToolset>v140</PlatformToolset>
++ <CharacterSet>Unicode</CharacterSet>
++ </PropertyGroup>
++ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
++ <ConfigurationType>StaticLibrary</ConfigurationType>
++ <PlatformToolset>v140</PlatformToolset>
++ <CharacterSet>Unicode</CharacterSet>
++ </PropertyGroup>
++ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
++ <ImportGroup Label="ExtensionSettings">
++ </ImportGroup>
++ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
++ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
++ </ImportGroup>
++ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
++ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
++ </ImportGroup>
++ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
++ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
++ </ImportGroup>
++ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
++ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
++ </ImportGroup>
++ <PropertyGroup Label="UserMacros" />
++ <PropertyGroup>
++ <_ProjectFileVersion>14.0.25123.0</_ProjectFileVersion>
++ </PropertyGroup>
++ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
++ <OutDir>$(ProjectDir)..\</OutDir>
++ <IntDir>$(Configuration)\</IntDir>
++ <TargetName>msgpackd</TargetName>
++ </PropertyGroup>
++ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
++ <TargetName>msgpackd</TargetName>
++ <OutDir>$(ProjectDir)..\</OutDir>
++ <IntDir>$(Configuration)\</IntDir>
++ </PropertyGroup>
++ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
++ <OutDir>$(ProjectDir)..\</OutDir>
++ <IntDir>$(Configuration)\</IntDir>
++ <TargetName>msgpack</TargetName>
++ </PropertyGroup>
++ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
++ <TargetName>msgpack</TargetName>
++ <OutDir>$(ProjectDir)..\</OutDir>
++ <IntDir>$(Configuration)\</IntDir>
++ </PropertyGroup>
++ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
++ <ClCompile>
++ <Optimization>Disabled</Optimization>
++ <AdditionalIncludeDirectories>.;include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
++ <PreprocessorDefinitions>WIN32_NATIVE;WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
++ <MinimalRebuild>true</MinimalRebuild>
++ <BasicRuntimeChecks>StackFrameRuntimeCheck</BasicRuntimeChecks>
++ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
++ <PrecompiledHeader />
++ <WarningLevel>Level3</WarningLevel>
++ <DebugInformationFormat>EditAndContinue</DebugInformationFormat>
++ </ClCompile>
++ <Lib>
++ <OutputFile>$(OutDir)\lib\x86\$(TargetName)$(TargetExt)</OutputFile>
++ </Lib>
++ <PostBuildEvent>
++ <Command>mkdir $(OutDir)\include
++
++mkdir $(OutDir)\include\msgpack
++
++copy include\*.h $(OutDir)\include\
++
++copy include\*.hpp $(OutDir)\include\
++xcopy /S /Y include\msgpack\*.h $(OutDir)\include\msgpack
++xcopy /S /Y include\msgpack\*.hpp $(OutDir)\include\msgpack
++</Command>
++ </PostBuildEvent>
++ </ItemDefinitionGroup>
++ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
++ <ClCompile>
++ <Optimization>Disabled</Optimization>
++ <AdditionalIncludeDirectories>.;include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
++ <PreprocessorDefinitions>WIN32_NATIVE;WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
++ <BasicRuntimeChecks>StackFrameRuntimeCheck</BasicRuntimeChecks>
++ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
++ <PrecompiledHeader>
++ </PrecompiledHeader>
++ <WarningLevel>Level3</WarningLevel>
++ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
++ </ClCompile>
++ <Lib>
++ <OutputFile>$(OutDir)\lib\x64\$(TargetName)$(TargetExt)</OutputFile>
++ </Lib>
++ <PostBuildEvent>
++ <Command>mkdir $(OutDir)\include
++
++mkdir $(OutDir)\include\msgpack
++
++copy include\*.h $(OutDir)\include\
++
++copy include\*.hpp $(OutDir)\include\
++xcopy /S /Y include\msgpack\*.h $(OutDir)\include\msgpack
++xcopy /S /Y include\msgpack\*.hpp $(OutDir)\include\msgpack
++</Command>
++ </PostBuildEvent>
++ </ItemDefinitionGroup>
++ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
++ <ClCompile>
++ <AdditionalIncludeDirectories>.;include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
++ <PreprocessorDefinitions>WIN32_NATIVE;WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
++ <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
++ <PrecompiledHeader />
++ <WarningLevel>Level3</WarningLevel>
++ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
++ <ProgramDataBaseFileName>$(OutDir)\lib\x86\$(TargetName).pdb</ProgramDataBaseFileName>
++ </ClCompile>
++ <Lib>
++ <OutputFile>$(OutDir)\lib\x86\$(TargetName)$(TargetExt)</OutputFile>
++ <LinkTimeCodeGeneration />
++ </Lib>
++ <PostBuildEvent>
++ <Command>mkdir $(OutDir)\include
++
++mkdir $(OutDir)\include\msgpack
++
++copy include\*.h $(OutDir)\include\
++
++copy include\*.hpp $(OutDir)\include\
++xcopy /S /Y include\msgpack\*.h $(OutDir)\include\msgpack
++xcopy /S /Y include\msgpack\*.hpp $(OutDir)\include\msgpack
++</Command>
++ </PostBuildEvent>
++ </ItemDefinitionGroup>
++ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
++ <ClCompile>
++ <AdditionalIncludeDirectories>.;include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
++ <PreprocessorDefinitions>MSGPACK_USE_CPP03;WIN32_NATIVE;WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
++ <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
++ <PrecompiledHeader>
++ </PrecompiledHeader>
++ <WarningLevel>Level3</WarningLevel>
++ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
++ <ProgramDataBaseFileName>$(OutDir)\lib\x64\$(TargetName).pdb</ProgramDataBaseFileName>
++ </ClCompile>
++ <Lib>
++ <OutputFile>$(OutDir)\lib\x64\$(TargetName)$(TargetExt)</OutputFile>
++ <LinkTimeCodeGeneration />
++ </Lib>
++ <PostBuildEvent>
++ <Command>mkdir $(OutDir)\include
++
++mkdir $(OutDir)\include\msgpack
++
++copy include\*.h $(OutDir)\include\
++
++copy include\*.hpp $(OutDir)\include\
++xcopy /S /Y include\msgpack\*.h $(OutDir)\include\msgpack
++xcopy /S /Y include\msgpack\*.hpp $(OutDir)\include\msgpack
++</Command>
++ </PostBuildEvent>
++ </ItemDefinitionGroup>
++ <ItemGroup>
++ <ClCompile Include="src\objectc.c" />
++ <ClCompile Include="src\unpack.c" />
++ <ClCompile Include="src\version.c" />
++ <ClCompile Include="src\vrefbuffer.c" />
++ <ClCompile Include="src\zone.c" />
++ </ItemGroup>
++ <ItemGroup>
++ <ClInclude Include="include\msgpack.hpp" />
++ </ItemGroup>
++ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
++ <ImportGroup Label="ExtensionTargets">
++ </ImportGroup>
++</Project>
+\ No newline at end of file
+--
+2.8.1.windows.1
+
--- /dev/null
+set BUILD=%SRC%..\build\r
+\r
+set NETTLE_VERSION=4e0b2723b76d4163fa37b2b456d41534154ec97c\r
+set NETTLE_URL=https://github.com/ShiftMediaProject/nettle/archive/%NETTLE_VERSION%.tar.gz\r
+\r
+mkdir %BUILD%\r
+\r
+if %USE_CACHE%==1 (\r
+ copy %CACHE_DIR%\%NETTLE_VERSION%.tar.gz %cd%\r
+) else (\r
+ wget %NETTLE_URL%\r
+)\r
+\r
+7z -y x %NETTLE_VERSION%.tar.gz && 7z -y x %NETTLE_VERSION%.tar -o%BUILD%\r
+del %NETTLE_VERSION%.tar && del %NETTLE_VERSION%.tar.gz && del %BUILD%\pax_global_header\r
+rename %BUILD%\nettle-%NETTLE_VERSION% nettle\r
+\r
+cd %BUILD%\nettle\r
+\r
+git apply --reject --whitespace=fix %SRC%\nettle\nettle-uwp.patch\r
+\r
+cd %SRC%
\ No newline at end of file
--- /dev/null
+--- a/SMP/libhogweed.vcxproj\r
++++ b/SMP/libhogweed.vcxproj\r
+@@ -242,6 +242,7 @@\r
+ <PropertyGroup Label="Globals">\r
+ <ProjectGuid>{200F3D3C-8B84-46D8-953A-6C0EBD283B5C}</ProjectGuid>\r
+ <RootNamespace>libhogweed</RootNamespace>\r
++ <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>\r
+ </PropertyGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">\r
+@@ -405,91 +406,91 @@\r
+ </ImportGroup>\r
+ <PropertyGroup Label="UserMacros" />\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>libhogweedd</TargetName>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>libhogweedd</TargetName>\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='DebugDLL|Win32'">\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>hogweedd</TargetName>\r
+ <LinkIncremental>false</LinkIncremental>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='DebugDLLStaticDeps|Win32'">\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>hogweedd</TargetName>\r
+ <LinkIncremental>false</LinkIncremental>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='DebugDLL|x64'">\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>hogweedd</TargetName>\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
+ <LinkIncremental>false</LinkIncremental>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='DebugDLLStaticDeps|x64'">\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>hogweedd</TargetName>\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
+ <LinkIncremental>false</LinkIncremental>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>libhogweed</TargetName>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseLTO|Win32'">\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>libhogweed</TargetName>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>libhogweed</TargetName>\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseLTO|x64'">\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>libhogweed</TargetName>\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseDLL|Win32'">\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>hogweed</TargetName>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseDLLStaticDeps|Win32'">\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>hogweed</TargetName>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseDLL|x64'">\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>hogweed</TargetName>\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseDLLStaticDeps|x64'">\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>hogweed</TargetName>\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">\r
+--- a/SMP/libnettle.vcxproj\r
++++ b/SMP/libnettle.vcxproj\r
+@@ -278,6 +278,7 @@\r
+ <PropertyGroup Label="Globals">\r
+ <ProjectGuid>{070FEF2B-0C3F-4F33-9D3C-53C7330BF518}</ProjectGuid>\r
+ <RootNamespace>libnettle</RootNamespace>\r
++ <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>\r
+ </PropertyGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">\r
+@@ -397,65 +398,65 @@\r
+ </ImportGroup>\r
+ <PropertyGroup Label="UserMacros" />\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>libnettled</TargetName>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>libnettled</TargetName>\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='DebugDLL|Win32'">\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>nettled</TargetName>\r
+ <LinkIncremental>false</LinkIncremental>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='DebugDLL|x64'">\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>nettled</TargetName>\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
+ <LinkIncremental>false</LinkIncremental>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>libnettle</TargetName>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseLTO|Win32'">\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>libnettle</TargetName>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>libnettle</TargetName>\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseLTO|x64'">\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>libnettle</TargetName>\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseDLL|Win32'">\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>nettle</TargetName>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseDLL|x64'">\r
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>\r
+ <TargetName>nettle</TargetName>\r
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>\r
++ <OutDir>$(ProjectDir)..\..\</OutDir>\r
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>\r
+ </PropertyGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">\r
+-- \r
+2.8.1.windows.1\r
--- /dev/null
+set BUILD=%SRC%..\build\r
+\r
+set ZLIB_VERSION=3a062eb61d0c3d4aa30851cd1a6597b977b56597\r
+set ZLIB_URL=https://github.com/ShiftMediaProject/zlib/archive/%ZLIB_VERSION%.tar.gz\r
+\r
+mkdir %BUILD%\r
+\r
+if %USE_CACHE%==1 (\r
+ copy %CACHE_DIR%\%ZLIB_VERSION%.tar.gz %cd%\r
+) else (\r
+ wget %ZLIB_URL%\r
+)\r
+\r
+7z -y x %ZLIB_VERSION%.tar.gz && 7z -y x %ZLIB_VERSION%.tar -o%BUILD%\r
+del %ZLIB_VERSION%.tar && del %ZLIB_VERSION%.tar.gz && del %BUILD%\pax_global_header\r
+rename %BUILD%\zlib-%ZLIB_VERSION% zlib\r
+\r
+cd %BUILD%\zlib\r
+\r
+git apply --reject --whitespace=fix %SRC%\zlib\zlib-uwp.patch\r
+\r
+cd %SRC%
\ No newline at end of file
--- /dev/null
+--- a/SMP/libzlib.vcxproj
++++ b/SMP/libzlib.vcxproj
+@@ -45,6 +45,7 @@
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}</ProjectGuid>
+ <RootNamespace>libzlib</RootNamespace>
++ <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+@@ -165,63 +166,63 @@
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libzlibd</TargetName>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libzlib</TargetName>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseLTO|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libzlib</TargetName>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseDLL|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>zlib</TargetName>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='DebugDLL|Win32'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>zlibd</TargetName>
+ <LinkIncremental>false</LinkIncremental>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libzlibd</TargetName>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libzlib</TargetName>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseLTO|x64'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>libzlib</TargetName>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='ReleaseDLL|x64'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>zlib</TargetName>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='DebugDLL|x64'">
+- <OutDir>$(ProjectDir)..\..\..\msvc\</OutDir>
+- <IntDir>$(SolutionDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
++ <OutDir>$(ProjectDir)..\..\</OutDir>
++ <IntDir>$(ProjectDir)obj\$(Configuration)\$(Platform)\$(ProjectName)\</IntDir>
+ <TargetName>zlibd</TargetName>
+ <LinkIncremental>false</LinkIncremental>
+ <CustomBuildAfterTargets>Clean</CustomBuildAfterTargets>
+--
+2.8.1.windows.1
+
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>\r
+<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r
+ <ItemGroup Label="ProjectConfigurations">\r
+ <ProjectConfiguration Include="Debug|Win32">\r
+ <Configuration>Debug</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release|Win32">\r
+ <Configuration>Release</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Debug|x64">\r
+ <Configuration>Debug</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release|x64">\r
+ <Configuration>Release</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ </ItemGroup>\r
+ <PropertyGroup Label="Globals">\r
+ <ProjectGuid>{8BE7F14D-B227-4D54-9105-7E5473F2D0BA}</ProjectGuid>\r
+ <RootNamespace>dhtchat</RootNamespace>\r
+ <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>\r
+ </PropertyGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>v140</PlatformToolset>\r
+ <CharacterSet>MultiByte</CharacterSet>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>v140</PlatformToolset>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ <CharacterSet>MultiByte</CharacterSet>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>v140</PlatformToolset>\r
+ <CharacterSet>MultiByte</CharacterSet>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>v140</PlatformToolset>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ <CharacterSet>MultiByte</CharacterSet>\r
+ </PropertyGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />\r
+ <ImportGroup Label="ExtensionSettings">\r
+ </ImportGroup>\r
+ <ImportGroup Label="Shared">\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <PropertyGroup Label="UserMacros" />\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">\r
+ <IntDir>$(ProjectName)\$(Platform)\$(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">\r
+ <IntDir>$(ProjectName)\$(Platform)\$(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">\r
+ <IntDir>$(ProjectName)\$(Platform)\$(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">\r
+ <IntDir>$(ProjectName)\$(Platform)\$(Configuration)\</IntDir>\r
+ <OutDir>$(ProjectDir)..\..\bin\x64</OutDir>\r
+ </PropertyGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">\r
+ <ClCompile>\r
+ <WarningLevel>Level3</WarningLevel>\r
+ <Optimization>Disabled</Optimization>\r
+ <SDLCheck>true</SDLCheck>\r
+ <AdditionalIncludeDirectories>..\..\include;..\..\msgpack-c\include;$(ProjectDir)</AdditionalIncludeDirectories>\r
+ <PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32_NATIVE;WIN32_LEAN_AND_MEAN;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <DisableSpecificWarnings>4804;4267;4244;4800;4273;</DisableSpecificWarnings>\r
+ <ProgramDataBaseFileName>$(IntDir)vc$(PlatformToolsetVersion).pdb</ProgramDataBaseFileName>\r
+ </ClCompile>\r
+ <Link>\r
+ <AdditionalDependencies>crypt32.lib;blake.lib;argon.lib;libgnutls.lib;opendht.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ <AdditionalLibraryDirectories>$(ProjectDir)..\..\lib\x64</AdditionalLibraryDirectories>\r
+ <AdditionalOptions>/ignore:4049 %(AdditionalOptions)</AdditionalOptions>\r
+ </Link>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">\r
+ <ClCompile>\r
+ <WarningLevel>Level3</WarningLevel>\r
+ <Optimization>Disabled</Optimization>\r
+ <SDLCheck>true</SDLCheck>\r
+ <AdditionalIncludeDirectories>..\..\include;..\..\msgpack-c\include;$(ProjectDir)</AdditionalIncludeDirectories>\r
+ <PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32_NATIVE;WIN32_LEAN_AND_MEAN;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <DisableSpecificWarnings>4804;4267;4244;4800;4273;</DisableSpecificWarnings>\r
+ <ProgramDataBaseFileName>$(IntDir)vc$(PlatformToolsetVersion).pdb</ProgramDataBaseFileName>\r
+ </ClCompile>\r
+ <Link>\r
+ <AdditionalDependencies>crypt32.lib;blake.lib;argon.lib;libgnutls.lib;opendht.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ <AdditionalLibraryDirectories>$(ProjectDir)..\..\lib\x64</AdditionalLibraryDirectories>\r
+ <AdditionalOptions>/ignore:4049 %(AdditionalOptions)</AdditionalOptions>\r
+ </Link>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">\r
+ <ClCompile>\r
+ <WarningLevel>Level3</WarningLevel>\r
+ <Optimization>MaxSpeed</Optimization>\r
+ <FunctionLevelLinking>true</FunctionLevelLinking>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <SDLCheck>true</SDLCheck>\r
+ <AdditionalIncludeDirectories>..\..\include;..\..\msgpack-c\include;$(ProjectDir)</AdditionalIncludeDirectories>\r
+ <PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32_NATIVE;WIN32_LEAN_AND_MEAN;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <DisableSpecificWarnings>4804;4267;4244;4800;4273;</DisableSpecificWarnings>\r
+ <ProgramDataBaseFileName>$(IntDir)vc$(PlatformToolsetVersion).pdb</ProgramDataBaseFileName>\r
+ </ClCompile>\r
+ <Link>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ <AdditionalDependencies>crypt32.lib;blake.lib;argon.lib;libgnutls.lib;opendht.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ <AdditionalLibraryDirectories>$(ProjectDir)..\..\lib\x64</AdditionalLibraryDirectories>\r
+ <AdditionalOptions>/ignore:4049 %(AdditionalOptions)</AdditionalOptions>\r
+ </Link>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">\r
+ <ClCompile>\r
+ <WarningLevel>Level3</WarningLevel>\r
+ <Optimization>MaxSpeed</Optimization>\r
+ <FunctionLevelLinking>true</FunctionLevelLinking>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <SDLCheck>true</SDLCheck>\r
+ <AdditionalIncludeDirectories>..\..\include;$(ProjectDir)contrib\build\include;$(ProjectDir)contrib\build\msgpack-c\include;$(ProjectDir)</AdditionalIncludeDirectories>\r
+ <PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32_NATIVE;WIN32_LEAN_AND_MEAN;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <DisableSpecificWarnings>4804;4267;4244;4800;4273;</DisableSpecificWarnings>\r
+ <ProgramDataBaseFileName>$(IntDir)vc$(PlatformToolsetVersion).pdb</ProgramDataBaseFileName>\r
+ </ClCompile>\r
+ <Link>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ <AdditionalDependencies>crypt32.lib;Argon2Ref.lib;libgnutls.lib;opendht.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ <AdditionalLibraryDirectories>$(ProjectDir)contrib\build\lib\x64;$(ProjectDir)..\..\lib\x64;$(ProjectDir)vs2015\build</AdditionalLibraryDirectories>\r
+ <AdditionalOptions>/ignore:4049 %(AdditionalOptions)</AdditionalOptions>\r
+ <OutputFile>$(OutDir)$(TargetName)$(TargetExt)</OutputFile>\r
+ </Link>\r
+ </ItemDefinitionGroup>\r
+ <ItemGroup>\r
+ <ClCompile Include="..\tools\dhtchat.cpp" />\r
+ <ClCompile Include="wingetopt.c" />\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <ClInclude Include="..\tools\tools_common.h" />\r
+ <ClInclude Include="wingetopt.h" />\r
+ </ItemGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />\r
+ <ImportGroup Label="ExtensionTargets">\r
+ </ImportGroup>\r
+</Project>
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>\r
+<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r
+ <ItemGroup Label="ProjectConfigurations">\r
+ <ProjectConfiguration Include="Debug|Win32">\r
+ <Configuration>Debug</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release|Win32">\r
+ <Configuration>Release</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Debug|x64">\r
+ <Configuration>Debug</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release|x64">\r
+ <Configuration>Release</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ </ItemGroup>\r
+ <PropertyGroup Label="Globals">\r
+ <ProjectGuid>{BF92AECF-AA1D-4B05-9D00-0247E92A24B5}</ProjectGuid>\r
+ <RootNamespace>dhtnode</RootNamespace>\r
+ <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>\r
+ </PropertyGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>v140</PlatformToolset>\r
+ <CharacterSet>MultiByte</CharacterSet>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>v140</PlatformToolset>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ <CharacterSet>MultiByte</CharacterSet>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>v140</PlatformToolset>\r
+ <CharacterSet>MultiByte</CharacterSet>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>v140</PlatformToolset>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ <CharacterSet>MultiByte</CharacterSet>\r
+ </PropertyGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />\r
+ <ImportGroup Label="ExtensionSettings">\r
+ </ImportGroup>\r
+ <ImportGroup Label="Shared">\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <PropertyGroup Label="UserMacros" />\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">\r
+ <IntDir>$(ProjectName)\$(Platform)\$(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">\r
+ <IntDir>$(ProjectName)\$(Platform)\$(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">\r
+ <IntDir>$(ProjectName)\$(Platform)\$(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">\r
+ <IntDir>$(ProjectName)\$(Platform)\$(Configuration)\</IntDir>\r
+ <OutDir>$(ProjectDir)..\..\bin\x64</OutDir>\r
+ </PropertyGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">\r
+ <ClCompile>\r
+ <WarningLevel>Level3</WarningLevel>\r
+ <Optimization>Disabled</Optimization>\r
+ <SDLCheck>true</SDLCheck>\r
+ <AdditionalIncludeDirectories>..\..\include;..\..\msgpack-c\include;$(ProjectDir)</AdditionalIncludeDirectories>\r
+ <PreprocessorDefinitions>WIN32_NATIVE;WIN32_LEAN_AND_MEAN;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <DisableSpecificWarnings>4804;4267;4244;4800;4273;4101;</DisableSpecificWarnings>\r
+ <ProgramDataBaseFileName>$(IntDir)vc$(PlatformToolsetVersion).pdb</ProgramDataBaseFileName>\r
+ </ClCompile>\r
+ <Link>\r
+ <AdditionalDependencies>crypt32.lib;blake.lib;argon.lib;libgnutls.lib;opendht.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ <AdditionalLibraryDirectories>$(ProjectDir)..\..\lib\x64</AdditionalLibraryDirectories>\r
+ <AdditionalOptions>/ignore:4049 %(AdditionalOptions)</AdditionalOptions>\r
+ </Link>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">\r
+ <ClCompile>\r
+ <WarningLevel>Level3</WarningLevel>\r
+ <Optimization>Disabled</Optimization>\r
+ <SDLCheck>true</SDLCheck>\r
+ <AdditionalIncludeDirectories>..\..\include;..\..\msgpack-c\include;$(ProjectDir)</AdditionalIncludeDirectories>\r
+ <PreprocessorDefinitions>WIN32_NATIVE;WIN32_LEAN_AND_MEAN;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <DisableSpecificWarnings>4804;4267;4244;4800;4273;4101;</DisableSpecificWarnings>\r
+ <ProgramDataBaseFileName>$(IntDir)vc$(PlatformToolsetVersion).pdb</ProgramDataBaseFileName>\r
+ </ClCompile>\r
+ <Link>\r
+ <AdditionalDependencies>crypt32.lib;blake.lib;argon.lib;libgnutls.lib;opendht.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ <AdditionalLibraryDirectories>$(ProjectDir)..\..\lib\x64</AdditionalLibraryDirectories>\r
+ <AdditionalOptions>/ignore:4049 %(AdditionalOptions)</AdditionalOptions>\r
+ </Link>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">\r
+ <ClCompile>\r
+ <WarningLevel>Level3</WarningLevel>\r
+ <Optimization>MaxSpeed</Optimization>\r
+ <FunctionLevelLinking>true</FunctionLevelLinking>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <SDLCheck>true</SDLCheck>\r
+ <AdditionalIncludeDirectories>..\..\include;..\..\msgpack-c\include;$(ProjectDir)</AdditionalIncludeDirectories>\r
+ <PreprocessorDefinitions>WIN32_NATIVE;WIN32_LEAN_AND_MEAN;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <DisableSpecificWarnings>4804;4267;4244;4800;4273;4101;</DisableSpecificWarnings>\r
+ <ProgramDataBaseFileName>$(IntDir)vc$(PlatformToolsetVersion).pdb</ProgramDataBaseFileName>\r
+ </ClCompile>\r
+ <Link>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ <AdditionalDependencies>crypt32.lib;blake.lib;argon.lib;libgnutls.lib;opendht.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ <AdditionalLibraryDirectories>$(ProjectDir)..\..\lib\x64</AdditionalLibraryDirectories>\r
+ <AdditionalOptions>/ignore:4049 %(AdditionalOptions)</AdditionalOptions>\r
+ </Link>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">\r
+ <ClCompile>\r
+ <WarningLevel>Level3</WarningLevel>\r
+ <Optimization>MaxSpeed</Optimization>\r
+ <FunctionLevelLinking>true</FunctionLevelLinking>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <SDLCheck>true</SDLCheck>\r
+ <AdditionalIncludeDirectories>..\..\include;$(ProjectDir)contrib\build\include;$(ProjectDir)contrib\build\msgpack-c\include;$(ProjectDir)</AdditionalIncludeDirectories>\r
+ <PreprocessorDefinitions>WIN32_NATIVE;WIN32_LEAN_AND_MEAN;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <DisableSpecificWarnings>4804;4267;4244;4800;4273;4101;</DisableSpecificWarnings>\r
+ <ProgramDataBaseFileName>$(IntDir)vc$(PlatformToolsetVersion).pdb</ProgramDataBaseFileName>\r
+ </ClCompile>\r
+ <Link>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ <AdditionalDependencies>crypt32.lib;Argon2Ref.lib;libgnutls.lib;opendht.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ <AdditionalLibraryDirectories>$(ProjectDir)contrib\build\lib\x64;$(ProjectDir)..\..\lib\x64;$(ProjectDir)vs2015\build</AdditionalLibraryDirectories>\r
+ <AdditionalOptions>/ignore:4049 %(AdditionalOptions)</AdditionalOptions>\r
+ <OutputFile>$(OutDir)$(TargetName)$(TargetExt)</OutputFile>\r
+ </Link>\r
+ </ItemDefinitionGroup>\r
+ <ItemGroup>\r
+ <ClCompile Include="..\tools\dhtnode.cpp" />\r
+ <ClCompile Include="wingetopt.c" />\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <ClInclude Include="..\tools\tools_common.h" />\r
+ <ClInclude Include="wingetopt.h" />\r
+ </ItemGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />\r
+ <ImportGroup Label="ExtensionTargets">\r
+ </ImportGroup>\r
+</Project>
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>\r
+<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r
+ <ItemGroup Label="ProjectConfigurations">\r
+ <ProjectConfiguration Include="Debug|Win32">\r
+ <Configuration>Debug</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release|Win32">\r
+ <Configuration>Release</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Debug|x64">\r
+ <Configuration>Debug</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release|x64">\r
+ <Configuration>Release</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ </ItemGroup>\r
+ <PropertyGroup Label="Globals">\r
+ <ProjectGuid>{78443BCD-4689-4007-A246-F8F34B27F561}</ProjectGuid>\r
+ <RootNamespace>dhtscanner</RootNamespace>\r
+ <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>\r
+ </PropertyGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>v140</PlatformToolset>\r
+ <CharacterSet>MultiByte</CharacterSet>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>v140</PlatformToolset>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ <CharacterSet>MultiByte</CharacterSet>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>v140</PlatformToolset>\r
+ <CharacterSet>MultiByte</CharacterSet>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>v140</PlatformToolset>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ <CharacterSet>MultiByte</CharacterSet>\r
+ </PropertyGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />\r
+ <ImportGroup Label="ExtensionSettings">\r
+ </ImportGroup>\r
+ <ImportGroup Label="Shared">\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <PropertyGroup Label="UserMacros" />\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">\r
+ <IntDir>$(ProjectName)\$(Platform)\$(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">\r
+ <IntDir>$(ProjectName)\$(Platform)\$(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">\r
+ <IntDir>$(ProjectName)\$(Platform)\$(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">\r
+ <IntDir>$(ProjectName)\$(Platform)\$(Configuration)\</IntDir>\r
+ <OutDir>$(ProjectDir)..\..\bin\x64</OutDir>\r
+ </PropertyGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">\r
+ <ClCompile>\r
+ <WarningLevel>Level3</WarningLevel>\r
+ <Optimization>Disabled</Optimization>\r
+ <SDLCheck>true</SDLCheck>\r
+ <AdditionalIncludeDirectories>..\..\include;..\..\msgpack-c\include;$(ProjectDir)</AdditionalIncludeDirectories>\r
+ <PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32_NATIVE;WIN32_LEAN_AND_MEAN;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <DisableSpecificWarnings>4804;4267;4244;4800;4273;4101;</DisableSpecificWarnings>\r
+ <ProgramDataBaseFileName>$(IntDir)vc$(PlatformToolsetVersion).pdb</ProgramDataBaseFileName>\r
+ </ClCompile>\r
+ <Link>\r
+ <AdditionalDependencies>crypt32.lib;blake.lib;argon.lib;libgnutls.lib;opendht.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ <AdditionalLibraryDirectories>$(ProjectDir)..\..\lib\x64</AdditionalLibraryDirectories>\r
+ <AdditionalOptions>/ignore:4049 %(AdditionalOptions)</AdditionalOptions>\r
+ </Link>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">\r
+ <ClCompile>\r
+ <WarningLevel>Level3</WarningLevel>\r
+ <Optimization>Disabled</Optimization>\r
+ <SDLCheck>true</SDLCheck>\r
+ <AdditionalIncludeDirectories>..\..\include;..\..\msgpack-c\include;$(ProjectDir)</AdditionalIncludeDirectories>\r
+ <PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32_NATIVE;WIN32_LEAN_AND_MEAN;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <DisableSpecificWarnings>4804;4267;4244;4800;4273;4101;</DisableSpecificWarnings>\r
+ <ProgramDataBaseFileName>$(IntDir)vc$(PlatformToolsetVersion).pdb</ProgramDataBaseFileName>\r
+ </ClCompile>\r
+ <Link>\r
+ <AdditionalDependencies>crypt32.lib;blake.lib;argon.lib;libgnutls.lib;opendht.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ <AdditionalLibraryDirectories>$(ProjectDir)..\..\lib\x64</AdditionalLibraryDirectories>\r
+ <AdditionalOptions>/ignore:4049 %(AdditionalOptions)</AdditionalOptions>\r
+ </Link>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">\r
+ <ClCompile>\r
+ <WarningLevel>Level3</WarningLevel>\r
+ <Optimization>MaxSpeed</Optimization>\r
+ <FunctionLevelLinking>true</FunctionLevelLinking>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <SDLCheck>true</SDLCheck>\r
+ <AdditionalIncludeDirectories>..\..\include;..\..\msgpack-c\include;$(ProjectDir)</AdditionalIncludeDirectories>\r
+ <PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32_NATIVE;WIN32_LEAN_AND_MEAN;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <DisableSpecificWarnings>4804;4267;4244;4800;4273;4101;</DisableSpecificWarnings>\r
+ <ProgramDataBaseFileName>$(IntDir)vc$(PlatformToolsetVersion).pdb</ProgramDataBaseFileName>\r
+ </ClCompile>\r
+ <Link>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ <AdditionalDependencies>crypt32.lib;blake.lib;argon.lib;libgnutls.lib;opendht.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ <AdditionalLibraryDirectories>$(ProjectDir)..\..\lib\x64</AdditionalLibraryDirectories>\r
+ <AdditionalOptions>/ignore:4049 %(AdditionalOptions)</AdditionalOptions>\r
+ </Link>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">\r
+ <ClCompile>\r
+ <WarningLevel>Level3</WarningLevel>\r
+ <Optimization>MaxSpeed</Optimization>\r
+ <FunctionLevelLinking>true</FunctionLevelLinking>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <SDLCheck>true</SDLCheck>\r
+ <AdditionalIncludeDirectories>..\..\include;$(ProjectDir)contrib\build\include;$(ProjectDir)contrib\build\msgpack-c\include;$(ProjectDir)</AdditionalIncludeDirectories>\r
+ <PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32_NATIVE;WIN32_LEAN_AND_MEAN;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <DisableSpecificWarnings>4804;4267;4244;4800;4273;4101;</DisableSpecificWarnings>\r
+ <ProgramDataBaseFileName>$(IntDir)vc$(PlatformToolsetVersion).pdb</ProgramDataBaseFileName>\r
+ </ClCompile>\r
+ <Link>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ <AdditionalDependencies>crypt32.lib;Argon2Ref.lib;libgnutls.lib;opendht.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ <AdditionalLibraryDirectories>$(ProjectDir)contrib\build\lib\x64;$(ProjectDir)..\..\lib\x64;$(ProjectDir)vs2015\build</AdditionalLibraryDirectories>\r
+ <AdditionalOptions>/ignore:4049 %(AdditionalOptions)</AdditionalOptions>\r
+ <OutputFile>$(OutDir)$(TargetName)$(TargetExt)</OutputFile>\r
+ </Link>\r
+ </ItemDefinitionGroup>\r
+ <ItemGroup>\r
+ <ClCompile Include="..\tools\dhtscanner.cpp" />\r
+ <ClCompile Include="wingetopt.c" />\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <ClInclude Include="..\tools\tools_common.h" />\r
+ <ClInclude Include="wingetopt.h" />\r
+ </ItemGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />\r
+ <ImportGroup Label="ExtensionTargets">\r
+ </ImportGroup>\r
+</Project>
\ No newline at end of file
--- /dev/null
+@echo on\r
+call contrib\src\fetch_and_patch.bat
\ No newline at end of file
--- /dev/null
+\r
+Microsoft Visual Studio Solution File, Format Version 12.00\r
+# Visual Studio 14\r
+VisualStudioVersion = 14.0.25123.0\r
+MinimumVisualStudioVersion = 10.0.40219.1\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "opendht", "opendht.vcxproj", "{711397CE-E5D5-467D-9457-8716C047E50C}"\r
+EndProject\r
+Global\r
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution\r
+ Debug|x64 = Debug|x64\r
+ Debug|x86 = Debug|x86\r
+ DebugDLL|x64 = DebugDLL|x64\r
+ DebugDLL|x86 = DebugDLL|x86\r
+ DebugDLLStaticDeps|x64 = DebugDLLStaticDeps|x64\r
+ DebugDLLStaticDeps|x86 = DebugDLLStaticDeps|x86\r
+ Release|x64 = Release|x64\r
+ Release|x86 = Release|x86\r
+ ReleaseDLL|x64 = ReleaseDLL|x64\r
+ ReleaseDLL|x86 = ReleaseDLL|x86\r
+ ReleaseDLLStaticDeps|x64 = ReleaseDLLStaticDeps|x64\r
+ ReleaseDLLStaticDeps|x86 = ReleaseDLLStaticDeps|x86\r
+ ReleaseLTO|x64 = ReleaseLTO|x64\r
+ ReleaseLTO|x86 = ReleaseLTO|x86\r
+ EndGlobalSection\r
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.Debug|x64.ActiveCfg = Debug|x64\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.Debug|x64.Build.0 = Debug|x64\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.Debug|x86.ActiveCfg = Debug|Win32\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.Debug|x86.Build.0 = Debug|Win32\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.DebugDLL|x64.ActiveCfg = Debug|x64\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.DebugDLL|x64.Build.0 = Debug|x64\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.DebugDLL|x86.ActiveCfg = Debug|Win32\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.DebugDLL|x86.Build.0 = Debug|Win32\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.DebugDLLStaticDeps|x64.ActiveCfg = Debug|x64\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.DebugDLLStaticDeps|x64.Build.0 = Debug|x64\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.DebugDLLStaticDeps|x86.ActiveCfg = Debug|Win32\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.DebugDLLStaticDeps|x86.Build.0 = Debug|Win32\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.Release|x64.ActiveCfg = Release|x64\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.Release|x64.Build.0 = Release|x64\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.Release|x86.ActiveCfg = Release|Win32\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.Release|x86.Build.0 = Release|Win32\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.ReleaseDLL|x64.ActiveCfg = Release|x64\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.ReleaseDLL|x64.Build.0 = Release|x64\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.ReleaseDLL|x86.ActiveCfg = Release|Win32\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.ReleaseDLL|x86.Build.0 = Release|Win32\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.ReleaseDLLStaticDeps|x64.ActiveCfg = Release|x64\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.ReleaseDLLStaticDeps|x64.Build.0 = Release|x64\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.ReleaseDLLStaticDeps|x86.ActiveCfg = Release|Win32\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.ReleaseDLLStaticDeps|x86.Build.0 = Release|Win32\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.ReleaseLTO|x64.ActiveCfg = Release|x64\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.ReleaseLTO|x64.Build.0 = Release|x64\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.ReleaseLTO|x86.ActiveCfg = Release|Win32\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.ReleaseLTO|x86.Build.0 = Release|Win32\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.Debug|x64.ActiveCfg = Debug|x64\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.Debug|x64.Build.0 = Debug|x64\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.Debug|x86.ActiveCfg = Debug|Win32\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.Debug|x86.Build.0 = Debug|Win32\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.DebugDLL|x64.ActiveCfg = DebugDLL|x64\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.DebugDLL|x64.Build.0 = DebugDLL|x64\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.DebugDLL|x86.ActiveCfg = DebugDLL|Win32\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.DebugDLL|x86.Build.0 = DebugDLL|Win32\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.DebugDLLStaticDeps|x64.ActiveCfg = DebugDLL|x64\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.DebugDLLStaticDeps|x64.Build.0 = DebugDLL|x64\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.DebugDLLStaticDeps|x86.ActiveCfg = DebugDLL|Win32\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.DebugDLLStaticDeps|x86.Build.0 = DebugDLL|Win32\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.Release|x64.ActiveCfg = Release|x64\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.Release|x64.Build.0 = Release|x64\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.Release|x86.ActiveCfg = Release|Win32\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.Release|x86.Build.0 = Release|Win32\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.ReleaseDLL|x64.ActiveCfg = ReleaseDLL|x64\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.ReleaseDLL|x64.Build.0 = ReleaseDLL|x64\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.ReleaseDLL|x86.ActiveCfg = ReleaseDLL|Win32\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.ReleaseDLL|x86.Build.0 = ReleaseDLL|Win32\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.ReleaseDLLStaticDeps|x64.ActiveCfg = ReleaseDLL|x64\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.ReleaseDLLStaticDeps|x64.Build.0 = ReleaseDLL|x64\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.ReleaseDLLStaticDeps|x86.ActiveCfg = ReleaseDLL|Win32\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.ReleaseDLLStaticDeps|x86.Build.0 = ReleaseDLL|Win32\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.ReleaseLTO|x64.ActiveCfg = ReleaseLTO|x64\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.ReleaseLTO|x64.Build.0 = ReleaseLTO|x64\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.ReleaseLTO|x86.ActiveCfg = ReleaseLTO|Win32\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.ReleaseLTO|x86.Build.0 = ReleaseLTO|Win32\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.Debug|x64.ActiveCfg = Debug|x64\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.Debug|x64.Build.0 = Debug|x64\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.Debug|x86.ActiveCfg = Debug|Win32\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.Debug|x86.Build.0 = Debug|Win32\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.DebugDLL|x64.ActiveCfg = DebugDLL|x64\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.DebugDLL|x64.Build.0 = DebugDLL|x64\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.DebugDLL|x86.ActiveCfg = DebugDLL|Win32\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.DebugDLL|x86.Build.0 = DebugDLL|Win32\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.DebugDLLStaticDeps|x64.ActiveCfg = DebugDLLStaticDeps|x64\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.DebugDLLStaticDeps|x64.Build.0 = DebugDLLStaticDeps|x64\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.DebugDLLStaticDeps|x86.ActiveCfg = DebugDLLStaticDeps|Win32\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.DebugDLLStaticDeps|x86.Build.0 = DebugDLLStaticDeps|Win32\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.Release|x64.ActiveCfg = Release|x64\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.Release|x64.Build.0 = Release|x64\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.Release|x86.ActiveCfg = Release|Win32\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.Release|x86.Build.0 = Release|Win32\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.ReleaseDLL|x64.ActiveCfg = ReleaseDLL|x64\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.ReleaseDLL|x64.Build.0 = ReleaseDLL|x64\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.ReleaseDLL|x86.ActiveCfg = ReleaseDLL|Win32\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.ReleaseDLL|x86.Build.0 = ReleaseDLL|Win32\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.ReleaseDLLStaticDeps|x64.ActiveCfg = ReleaseDLLStaticDeps|x64\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.ReleaseDLLStaticDeps|x64.Build.0 = ReleaseDLLStaticDeps|x64\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.ReleaseDLLStaticDeps|x86.ActiveCfg = ReleaseDLLStaticDeps|Win32\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.ReleaseDLLStaticDeps|x86.Build.0 = ReleaseDLLStaticDeps|Win32\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.ReleaseLTO|x64.ActiveCfg = ReleaseLTO|x64\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.ReleaseLTO|x64.Build.0 = ReleaseLTO|x64\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.ReleaseLTO|x86.ActiveCfg = ReleaseLTO|Win32\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.ReleaseLTO|x86.Build.0 = ReleaseLTO|Win32\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.Debug|x64.ActiveCfg = Debug|x64\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.Debug|x64.Build.0 = Debug|x64\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.Debug|x86.ActiveCfg = Debug|Win32\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.Debug|x86.Build.0 = Debug|Win32\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.DebugDLL|x64.ActiveCfg = DebugDLL|x64\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.DebugDLL|x64.Build.0 = DebugDLL|x64\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.DebugDLL|x86.ActiveCfg = DebugDLL|Win32\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.DebugDLL|x86.Build.0 = DebugDLL|Win32\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.DebugDLLStaticDeps|x64.ActiveCfg = DebugDLL|x64\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.DebugDLLStaticDeps|x64.Build.0 = DebugDLL|x64\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.DebugDLLStaticDeps|x86.ActiveCfg = DebugDLL|Win32\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.DebugDLLStaticDeps|x86.Build.0 = DebugDLL|Win32\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.Release|x64.ActiveCfg = Release|x64\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.Release|x64.Build.0 = Release|x64\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.Release|x86.ActiveCfg = Release|Win32\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.Release|x86.Build.0 = Release|Win32\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.ReleaseDLL|x64.ActiveCfg = ReleaseDLL|x64\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.ReleaseDLL|x64.Build.0 = ReleaseDLL|x64\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.ReleaseDLL|x86.ActiveCfg = ReleaseDLL|Win32\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.ReleaseDLL|x86.Build.0 = ReleaseDLL|Win32\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.ReleaseDLLStaticDeps|x64.ActiveCfg = ReleaseDLL|x64\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.ReleaseDLLStaticDeps|x64.Build.0 = ReleaseDLL|x64\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.ReleaseDLLStaticDeps|x86.ActiveCfg = ReleaseDLL|Win32\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.ReleaseDLLStaticDeps|x86.Build.0 = ReleaseDLL|Win32\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.ReleaseLTO|x64.ActiveCfg = ReleaseLTO|x64\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.ReleaseLTO|x64.Build.0 = ReleaseLTO|x64\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.ReleaseLTO|x86.ActiveCfg = ReleaseLTO|Win32\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.ReleaseLTO|x86.Build.0 = ReleaseLTO|Win32\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.Debug|x64.ActiveCfg = Debug|x64\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.Debug|x64.Build.0 = Debug|x64\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.Debug|x86.ActiveCfg = Debug|Win32\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.Debug|x86.Build.0 = Debug|Win32\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.DebugDLL|x64.ActiveCfg = DebugDLL|x64\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.DebugDLL|x64.Build.0 = DebugDLL|x64\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.DebugDLL|x86.ActiveCfg = DebugDLL|Win32\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.DebugDLL|x86.Build.0 = DebugDLL|Win32\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.DebugDLLStaticDeps|x64.ActiveCfg = DebugDLLStaticDeps|x64\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.DebugDLLStaticDeps|x64.Build.0 = DebugDLLStaticDeps|x64\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.DebugDLLStaticDeps|x86.ActiveCfg = DebugDLLStaticDeps|Win32\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.DebugDLLStaticDeps|x86.Build.0 = DebugDLLStaticDeps|Win32\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.Release|x64.ActiveCfg = Release|x64\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.Release|x64.Build.0 = Release|x64\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.Release|x86.ActiveCfg = Release|Win32\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.Release|x86.Build.0 = Release|Win32\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.ReleaseDLL|x64.ActiveCfg = ReleaseDLL|x64\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.ReleaseDLL|x64.Build.0 = ReleaseDLL|x64\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.ReleaseDLL|x86.ActiveCfg = ReleaseDLL|Win32\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.ReleaseDLL|x86.Build.0 = ReleaseDLL|Win32\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.ReleaseDLLStaticDeps|x64.ActiveCfg = ReleaseDLLStaticDeps|x64\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.ReleaseDLLStaticDeps|x64.Build.0 = ReleaseDLLStaticDeps|x64\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.ReleaseDLLStaticDeps|x86.ActiveCfg = ReleaseDLLStaticDeps|Win32\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.ReleaseDLLStaticDeps|x86.Build.0 = ReleaseDLLStaticDeps|Win32\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.ReleaseLTO|x64.ActiveCfg = ReleaseLTO|x64\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.ReleaseLTO|x64.Build.0 = ReleaseLTO|x64\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.ReleaseLTO|x86.ActiveCfg = ReleaseLTO|Win32\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.ReleaseLTO|x86.Build.0 = ReleaseLTO|Win32\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.Debug|x64.ActiveCfg = Debug|x64\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.Debug|x64.Build.0 = Debug|x64\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.Debug|x86.ActiveCfg = Debug|Win32\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.Debug|x86.Build.0 = Debug|Win32\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.DebugDLL|x64.ActiveCfg = DebugDLL|x64\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.DebugDLL|x64.Build.0 = DebugDLL|x64\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.DebugDLL|x86.ActiveCfg = DebugDLL|Win32\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.DebugDLL|x86.Build.0 = DebugDLL|Win32\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.DebugDLLStaticDeps|x64.ActiveCfg = DebugDLL|x64\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.DebugDLLStaticDeps|x64.Build.0 = DebugDLL|x64\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.DebugDLLStaticDeps|x86.ActiveCfg = DebugDLL|Win32\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.DebugDLLStaticDeps|x86.Build.0 = DebugDLL|Win32\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.Release|x64.ActiveCfg = Release|x64\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.Release|x64.Build.0 = Release|x64\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.Release|x86.ActiveCfg = Release|Win32\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.Release|x86.Build.0 = Release|Win32\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.ReleaseDLL|x64.ActiveCfg = ReleaseDLL|x64\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.ReleaseDLL|x64.Build.0 = ReleaseDLL|x64\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.ReleaseDLL|x86.ActiveCfg = ReleaseDLL|Win32\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.ReleaseDLL|x86.Build.0 = ReleaseDLL|Win32\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.ReleaseDLLStaticDeps|x64.ActiveCfg = ReleaseDLL|x64\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.ReleaseDLLStaticDeps|x64.Build.0 = ReleaseDLL|x64\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.ReleaseDLLStaticDeps|x86.ActiveCfg = ReleaseDLL|Win32\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.ReleaseDLLStaticDeps|x86.Build.0 = ReleaseDLL|Win32\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.ReleaseLTO|x64.ActiveCfg = ReleaseLTO|x64\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.ReleaseLTO|x64.Build.0 = ReleaseLTO|x64\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.ReleaseLTO|x86.ActiveCfg = ReleaseLTO|Win32\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.ReleaseLTO|x86.Build.0 = ReleaseLTO|Win32\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.Debug|x64.ActiveCfg = Debug|x64\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.Debug|x64.Build.0 = Debug|x64\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.Debug|x86.ActiveCfg = Debug|Win32\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.Debug|x86.Build.0 = Debug|Win32\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.DebugDLL|x64.ActiveCfg = DebugDLL|x64\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.DebugDLL|x64.Build.0 = DebugDLL|x64\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.DebugDLL|x86.ActiveCfg = DebugDLL|Win32\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.DebugDLL|x86.Build.0 = DebugDLL|Win32\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.DebugDLLStaticDeps|x64.ActiveCfg = DebugDLL|x64\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.DebugDLLStaticDeps|x64.Build.0 = DebugDLL|x64\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.DebugDLLStaticDeps|x86.ActiveCfg = DebugDLL|Win32\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.DebugDLLStaticDeps|x86.Build.0 = DebugDLL|Win32\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.Release|x64.ActiveCfg = Release|x64\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.Release|x64.Build.0 = Release|x64\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.Release|x86.ActiveCfg = Release|Win32\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.Release|x86.Build.0 = Release|Win32\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.ReleaseDLL|x64.ActiveCfg = ReleaseDLL|x64\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.ReleaseDLL|x64.Build.0 = ReleaseDLL|x64\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.ReleaseDLL|x86.ActiveCfg = ReleaseDLL|Win32\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.ReleaseDLL|x86.Build.0 = ReleaseDLL|Win32\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.ReleaseDLLStaticDeps|x64.ActiveCfg = ReleaseDLL|x64\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.ReleaseDLLStaticDeps|x64.Build.0 = ReleaseDLL|x64\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.ReleaseDLLStaticDeps|x86.ActiveCfg = ReleaseDLL|Win32\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.ReleaseDLLStaticDeps|x86.Build.0 = ReleaseDLL|Win32\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.ReleaseLTO|x64.ActiveCfg = ReleaseLTO|x64\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.ReleaseLTO|x64.Build.0 = ReleaseLTO|x64\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.ReleaseLTO|x86.ActiveCfg = ReleaseLTO|Win32\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.ReleaseLTO|x86.Build.0 = ReleaseLTO|Win32\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.Debug|x64.ActiveCfg = Debug|x64\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.Debug|x64.Build.0 = Debug|x64\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.Debug|x86.ActiveCfg = Debug|Win32\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.Debug|x86.Build.0 = Debug|Win32\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.DebugDLL|x64.ActiveCfg = Release|x64\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.DebugDLL|x64.Build.0 = Release|x64\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.DebugDLL|x86.ActiveCfg = Debug|Win32\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.DebugDLL|x86.Build.0 = Debug|Win32\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.DebugDLLStaticDeps|x64.ActiveCfg = Release|x64\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.DebugDLLStaticDeps|x64.Build.0 = Release|x64\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.DebugDLLStaticDeps|x86.ActiveCfg = Debug|Win32\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.DebugDLLStaticDeps|x86.Build.0 = Debug|Win32\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.Release|x64.ActiveCfg = Release|x64\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.Release|x64.Build.0 = Release|x64\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.Release|x86.ActiveCfg = Release|Win32\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.Release|x86.Build.0 = Release|Win32\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.ReleaseDLL|x64.ActiveCfg = Release|x64\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.ReleaseDLL|x64.Build.0 = Release|x64\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.ReleaseDLL|x86.ActiveCfg = Release|Win32\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.ReleaseDLL|x86.Build.0 = Release|Win32\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.ReleaseDLLStaticDeps|x64.ActiveCfg = Release|x64\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.ReleaseDLLStaticDeps|x64.Build.0 = Release|x64\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.ReleaseDLLStaticDeps|x86.ActiveCfg = Release|Win32\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.ReleaseDLLStaticDeps|x86.Build.0 = Release|Win32\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.ReleaseLTO|x64.ActiveCfg = Release|x64\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.ReleaseLTO|x64.Build.0 = Release|x64\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.ReleaseLTO|x86.ActiveCfg = Release|Win32\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.ReleaseLTO|x86.Build.0 = Release|Win32\r
+ EndGlobalSection\r
+ GlobalSection(SolutionProperties) = preSolution\r
+ HideSolutionNode = FALSE\r
+ EndGlobalSection\r
+ GlobalSection(NestedProjects) = preSolution\r
+ HideSolutionNode = FALSE\r
+ EndGlobalSection\r
+EndGlobal\r
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>\r
+<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r
+ <ItemGroup Label="ProjectConfigurations">\r
+ <ProjectConfiguration Include="Debug|Win32">\r
+ <Configuration>Debug</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release|Win32">\r
+ <Configuration>Release</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Debug|x64">\r
+ <Configuration>Debug</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release|x64">\r
+ <Configuration>Release</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <ClCompile Include="wingetopt.c" />\r
+ <ClCompile Include="..\src\base64.cpp" />\r
+ <ClCompile Include="..\src\callbacks.cpp" />\r
+ <ClCompile Include="..\src\crypto.cpp" />\r
+ <ClCompile Include="..\src\default_types.cpp" />\r
+ <ClCompile Include="..\src\dht.cpp" />\r
+ <ClCompile Include="..\src\dhtrunner.cpp" />\r
+ <ClCompile Include="..\src\dht_proxy_server.cpp" />\r
+ <ClCompile Include="..\src\indexation\pht.cpp" />\r
+ <ClCompile Include="..\src\infohash.cpp" />\r
+ <ClCompile Include="..\src\log.cpp" />\r
+ <ClCompile Include="..\src\network_engine.cpp" />\r
+ <ClCompile Include="..\src\node.cpp" />\r
+ <ClCompile Include="..\src\node_cache.cpp" />\r
+ <ClCompile Include="..\src\routing_table.cpp" />\r
+ <ClCompile Include="..\src\securedht.cpp" />\r
+ <ClCompile Include="..\src\utils.cpp" />\r
+ <ClCompile Include="..\src\value.cpp" />\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <ClInclude Include="unistd.h" />\r
+ <ClInclude Include="wingetopt.h" />\r
+ <ClInclude Include="..\include\opendht.h" />\r
+ <ClInclude Include="..\include\opendht\callbacks.h" />\r
+ <ClInclude Include="..\include\opendht\crypto.h" />\r
+ <ClInclude Include="..\include\opendht\def.h" />\r
+ <ClInclude Include="..\include\opendht\default_types.h" />\r
+ <ClInclude Include="..\include\opendht\dht.h" />\r
+ <ClInclude Include="..\include\opendht\dhtrunner.h" />\r
+ <ClInclude Include="..\include\opendht\dht_proxy_server.h" />\r
+ <ClInclude Include="..\include\opendht\indexation\pht.h" />\r
+ <ClInclude Include="..\include\opendht\infohash.h" />\r
+ <ClInclude Include="..\include\opendht\log.h" />\r
+ <ClInclude Include="..\include\opendht\log_enable.h" />\r
+ <ClInclude Include="..\include\opendht\network_engine.h" />\r
+ <ClInclude Include="..\include\opendht\node.h" />\r
+ <ClInclude Include="..\include\opendht\node_cache.h" />\r
+ <ClInclude Include="..\include\opendht\rate_limiter.h" />\r
+ <ClInclude Include="..\include\opendht\rng.h" />\r
+ <ClInclude Include="..\include\opendht\routing_table.h" />\r
+ <ClInclude Include="..\include\opendht\scheduler.h" />\r
+ <ClInclude Include="..\include\opendht\securedht.h" />\r
+ <ClInclude Include="..\include\opendht\sockaddr.h" />\r
+ <ClInclude Include="..\include\opendht\utils.h" />\r
+ <ClInclude Include="..\include\opendht\value.h" />\r
+ <ClInclude Include="..\src\base64.h" />\r
+ <ClInclude Include="..\src\listener.h" />\r
+ <ClInclude Include="..\src\net.h" />\r
+ <ClInclude Include="..\src\parsed_message.h" />\r
+ <ClInclude Include="..\src\request.h" />\r
+ <ClInclude Include="..\src\search.h" />\r
+ <ClInclude Include="..\src\storage.h" />\r
+ </ItemGroup>\r
+ <PropertyGroup Label="Globals">\r
+ <ProjectGuid>{711397CE-E5D5-467D-9457-8716C047E50C}</ProjectGuid>\r
+ <RootNamespace>opendht</RootNamespace>\r
+ <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>\r
+ </PropertyGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">\r
+ <ConfigurationType>StaticLibrary</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>v140</PlatformToolset>\r
+ <CharacterSet>MultiByte</CharacterSet>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">\r
+ <ConfigurationType>StaticLibrary</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>v140</PlatformToolset>\r
+ <WholeProgramOptimization>false</WholeProgramOptimization>\r
+ <CharacterSet>MultiByte</CharacterSet>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">\r
+ <ConfigurationType>StaticLibrary</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>v140</PlatformToolset>\r
+ <CharacterSet>MultiByte</CharacterSet>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">\r
+ <ConfigurationType>StaticLibrary</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>v140</PlatformToolset>\r
+ <WholeProgramOptimization>false</WholeProgramOptimization>\r
+ <CharacterSet>MultiByte</CharacterSet>\r
+ </PropertyGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />\r
+ <ImportGroup Label="ExtensionSettings">\r
+ </ImportGroup>\r
+ <ImportGroup Label="Shared">\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <PropertyGroup Label="UserMacros" />\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">\r
+ <OutDir>$(ProjectDir)..\..\</OutDir>\r
+ <IntDir>$(Platform)\$(Configuration)\</IntDir>\r
+ <TargetName>opendhtd</TargetName>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">\r
+ <OutDir>$(ProjectDir)..\..\</OutDir>\r
+ <IntDir>$(Platform)\$(Configuration)\</IntDir>\r
+ <TargetName>opendht</TargetName>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">\r
+ <OutDir>$(ProjectDir)..\..\</OutDir>\r
+ <TargetName>opendhtd</TargetName>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">\r
+ <OutDir>$(ProjectDir)..\..\</OutDir>\r
+ <TargetName>opendht</TargetName>\r
+ </PropertyGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">\r
+ <ClCompile>\r
+ <WarningLevel>Level3</WarningLevel>\r
+ <Optimization>Disabled</Optimization>\r
+ <SDLCheck>true</SDLCheck>\r
+ <AdditionalIncludeDirectories>..\..\include;..\include;..\include\opendht;..\..\msgpack-c\include;</AdditionalIncludeDirectories>\r
+ <PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32_LEAN_AND_MEAN;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <DisableSpecificWarnings>4804;4800;4101;4267;4244;4503;</DisableSpecificWarnings>\r
+ <AdditionalOptions>-D_SCL_SECURE_NO_WARNINGS %(AdditionalOptions)</AdditionalOptions>\r
+ <SuppressStartupBanner>false</SuppressStartupBanner>\r
+ </ClCompile>\r
+ <PostBuildEvent>\r
+ <Command>mkdir $(OutDir)\include\r
+mkdir $(OutDir)\include\opendht\r
+mkdir $(OutDir)\include\opendht\indexation\r
+copy ..\include\opendht\indexation\*.h $(OutDir)\include\opendht\indexation\r
+copy ..\include\opendht\*.h $(OutDir)\include\opendht\r
+copy ..\include\opendht.h $(OutDir)\include\</Command>\r
+ </PostBuildEvent>\r
+ <Lib>\r
+ <OutputFile>$(OutDir)\lib\x86\$(TargetName)$(TargetExt)</OutputFile>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">\r
+ <ClCompile>\r
+ <WarningLevel>Level3</WarningLevel>\r
+ <Optimization>Disabled</Optimization>\r
+ <SDLCheck>true</SDLCheck>\r
+ <AdditionalIncludeDirectories>..\..\include;..\include;..\include\opendht;..\..\msgpack-c\include;</AdditionalIncludeDirectories>\r
+ <PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32_LEAN_AND_MEAN;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <DisableSpecificWarnings>4804;4800;4101;4267;4244;4503;</DisableSpecificWarnings>\r
+ <AdditionalOptions>-D_SCL_SECURE_NO_WARNINGS %(AdditionalOptions)</AdditionalOptions>\r
+ </ClCompile>\r
+ <PostBuildEvent>\r
+ <Command>mkdir $(OutDir)\include\r
+mkdir $(OutDir)\include\opendht\r
+mkdir $(OutDir)\include\opendht\indexation\r
+copy ..\include\opendht\indexation\*.h $(OutDir)\include\opendht\indexation\r
+copy ..\include\opendht\*.h $(OutDir)\include\opendht\r
+copy ..\include\opendht.h $(OutDir)\include\</Command>\r
+ </PostBuildEvent>\r
+ <Lib>\r
+ <OutputFile>$(OutDir)\lib\x64\$(TargetName)$(TargetExt)</OutputFile>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">\r
+ <ClCompile>\r
+ <WarningLevel>Level3</WarningLevel>\r
+ <Optimization>MaxSpeed</Optimization>\r
+ <FunctionLevelLinking>true</FunctionLevelLinking>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <SDLCheck>true</SDLCheck>\r
+ <AdditionalIncludeDirectories>..\..\include;..\include;..\include\opendht;..\..\msgpack-c\include;</AdditionalIncludeDirectories>\r
+ <PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32_LEAN_AND_MEAN;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <DisableSpecificWarnings>4804;4800;4101;4267;4244;4503;</DisableSpecificWarnings>\r
+ <AdditionalOptions>-D_SCL_SECURE_NO_WARNINGS %(AdditionalOptions)</AdditionalOptions>\r
+ <ProgramDataBaseFileName>$(OutDir)\lib\x86\$(TargetName).pdb</ProgramDataBaseFileName>\r
+ </ClCompile>\r
+ <Link>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ </Link>\r
+ <PostBuildEvent>\r
+ <Command>mkdir $(OutDir)\include\r
+mkdir $(OutDir)\include\opendht\r
+mkdir $(OutDir)\include\opendht\indexation\r
+copy ..\include\opendht\indexation\*.h $(OutDir)\include\opendht\indexation\r
+copy ..\include\opendht\*.h $(OutDir)\include\opendht\r
+copy ..\include\opendht.h $(OutDir)\include\</Command>\r
+ </PostBuildEvent>\r
+ <Lib>\r
+ <OutputFile>$(OutDir)\lib\x86\$(TargetName)$(TargetExt)</OutputFile>\r
+ <LinkTimeCodeGeneration>false</LinkTimeCodeGeneration>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">\r
+ <ClCompile>\r
+ <WarningLevel>Level3</WarningLevel>\r
+ <Optimization>Full</Optimization>\r
+ <FunctionLevelLinking>true</FunctionLevelLinking>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <SDLCheck>true</SDLCheck>\r
+ <AdditionalIncludeDirectories>$(ProjectDir)contrib\build\include;$(ProjectDir)..\include;$(ProjectDir)..\include\opendht;$(ProjectDir)contrib\build\msgpack-c\include;$(ProjectDir)contrib\build\argon2\include;$(ProjectDir)..\..\include;$(ProjectDir)..\..\argon2\include</AdditionalIncludeDirectories>\r
+ <PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;WIN32_LEAN_AND_MEAN;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <DisableSpecificWarnings>4804;4800;4101;4267;4244;4503;4273;</DisableSpecificWarnings>\r
+ <AdditionalOptions>-D_SCL_SECURE_NO_WARNINGS %(AdditionalOptions)</AdditionalOptions>\r
+ <ProgramDataBaseFileName>$(OutDir)\lib\x64\$(TargetName).pdb</ProgramDataBaseFileName>\r
+ </ClCompile>\r
+ <Link>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ </Link>\r
+ <PostBuildEvent>\r
+ <Command>mkdir $(OutDir)\include\r
+mkdir $(OutDir)\include\opendht\r
+mkdir $(OutDir)\include\opendht\indexation\r
+copy ..\include\opendht\indexation\*.h $(OutDir)\include\opendht\indexation\r
+copy ..\include\opendht\*.h $(OutDir)\include\opendht\r
+copy ..\include\opendht.h $(OutDir)\include\</Command>\r
+ </PostBuildEvent>\r
+ <Lib>\r
+ <OutputFile>$(OutDir)\lib\x64\$(TargetName)$(TargetExt)</OutputFile>\r
+ <LinkTimeCodeGeneration>false</LinkTimeCodeGeneration>\r
+ <AdditionalLibraryDirectories>\r
+ </AdditionalLibraryDirectories>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />\r
+ <ImportGroup Label="ExtensionTargets">\r
+ </ImportGroup>\r
+</Project>
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>\r
+<Project ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r
+ <ItemGroup>\r
+ <ClCompile Include="..\src\callbacks.cpp">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\src\crypto.cpp">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\src\default_types.cpp">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\src\dht.cpp">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\src\dhtrunner.cpp">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\src\infohash.cpp">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\src\log.cpp">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\src\network_engine.cpp">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\src\node.cpp">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\src\node_cache.cpp">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\src\routing_table.cpp">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\src\securedht.cpp">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\src\utils.cpp">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\src\value.cpp">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\src\base64.cpp">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\src\dht_proxy_server.cpp">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\src\indexation\pht.cpp">\r
+ <Filter>Source Files\indexation</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="wingetopt.c" />\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <ClInclude Include="..\include\opendht\def.h" />\r
+ <ClInclude Include="..\include\opendht\sockaddr.h" />\r
+ <ClInclude Include="..\src\net.h" />\r
+ <ClInclude Include="..\src\parsed_message.h" />\r
+ <ClInclude Include="..\src\base64.h">\r
+ <Filter>Source Files</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\src\listener.h">\r
+ <Filter>Source Files</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\src\request.h">\r
+ <Filter>Source Files</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\src\search.h">\r
+ <Filter>Source Files</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\src\storage.h">\r
+ <Filter>Source Files</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\include\opendht.h">\r
+ <Filter>Header Files</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\include\opendht\callbacks.h">\r
+ <Filter>Header Files\opendht</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\include\opendht\crypto.h">\r
+ <Filter>Header Files\opendht</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\include\opendht\default_types.h">\r
+ <Filter>Header Files\opendht</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\include\opendht\dht.h">\r
+ <Filter>Header Files\opendht</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\include\opendht\dht_proxy_server.h">\r
+ <Filter>Header Files\opendht</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\include\opendht\dhtrunner.h">\r
+ <Filter>Header Files\opendht</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\include\opendht\infohash.h">\r
+ <Filter>Header Files\opendht</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\include\opendht\log.h">\r
+ <Filter>Header Files\opendht</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\include\opendht\log_enable.h">\r
+ <Filter>Header Files\opendht</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\include\opendht\network_engine.h">\r
+ <Filter>Header Files\opendht</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\include\opendht\node.h">\r
+ <Filter>Header Files\opendht</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\include\opendht\node_cache.h">\r
+ <Filter>Header Files\opendht</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\include\opendht\rate_limiter.h">\r
+ <Filter>Header Files\opendht</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\include\opendht\rng.h">\r
+ <Filter>Header Files\opendht</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\include\opendht\routing_table.h">\r
+ <Filter>Header Files\opendht</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\include\opendht\scheduler.h">\r
+ <Filter>Header Files\opendht</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\include\opendht\securedht.h">\r
+ <Filter>Header Files\opendht</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\include\opendht\utils.h">\r
+ <Filter>Header Files\opendht</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\include\opendht\value.h">\r
+ <Filter>Header Files\opendht</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\include\opendht\indexation\pht.h">\r
+ <Filter>Header Files\opendht\indexation</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="unistd.h" />\r
+ <ClInclude Include="wingetopt.h" />\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <Filter Include="Source Files">\r
+ <UniqueIdentifier>{be73d179-6cf2-4c05-8d91-53c8499a7134}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Header Files">\r
+ <UniqueIdentifier>{d1ab5bfe-3ab1-45ee-9324-b4b071887668}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\indexation">\r
+ <UniqueIdentifier>{939134d3-d5a2-4458-bca8-a10ea9042ea2}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Header Files\opendht">\r
+ <UniqueIdentifier>{0b46e2d0-4874-404c-bae3-fae8d49b85b8}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Header Files\opendht\indexation">\r
+ <UniqueIdentifier>{03bd5139-2ae5-41b0-a77b-944814e1b2b6}</UniqueIdentifier>\r
+ </Filter>\r
+ </ItemGroup>\r
+</Project>
\ No newline at end of file
--- /dev/null
+\r
+Microsoft Visual Studio Solution File, Format Version 12.00\r
+# Visual Studio 14\r
+VisualStudioVersion = 14.0.25420.1\r
+MinimumVisualStudioVersion = 10.0.40219.1\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "opendht", "opendht.vcxproj", "{711397CE-E5D5-467D-9457-8716C047E50C}"\r
+ ProjectSection(ProjectDependencies) = postProject\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518} = {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}\r
+ {122A2EA4-B283-4241-9655-786DE78283B2} = {122A2EA4-B283-4241-9655-786DE78283B2}\r
+ {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2} = {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02} = {6F610EE8-7F59-4191-AB88-F63843267C02}\r
+ EndProjectSection\r
+EndProject\r
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "dependencies", "dependencies", "{71A1A9EC-1104-4335-A87E-AF8749FD5B34}"\r
+EndProject\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libgmp", "contrib\build\gmp\SMP\libgmp.vcxproj", "{02B94302-23D6-43EF-8865-95CDE99D5DC2}"\r
+EndProject\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libgnutls", "contrib\build\gnutls\SMP\libgnutls.vcxproj", "{6F610EE8-7F59-4191-AB88-F63843267C02}"\r
+ ProjectSection(ProjectDependencies) = postProject\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2} = {02B94302-23D6-43EF-8865-95CDE99D5DC2}\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518} = {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32} = {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C} = {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B} = {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}\r
+ EndProjectSection\r
+EndProject\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libiconv", "contrib\build\libiconv\SMP\libiconv.vcxproj", "{CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}"\r
+EndProject\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libhogweed", "contrib\build\nettle\SMP\libhogweed.vcxproj", "{200F3D3C-8B84-46D8-953A-6C0EBD283B5C}"\r
+ ProjectSection(ProjectDependencies) = postProject\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2} = {02B94302-23D6-43EF-8865-95CDE99D5DC2}\r
+ EndProjectSection\r
+EndProject\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libnettle", "contrib\build\nettle\SMP\libnettle.vcxproj", "{070FEF2B-0C3F-4F33-9D3C-53C7330BF518}"\r
+EndProject\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libzlib", "contrib\build\zlib\SMP\libzlib.vcxproj", "{CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}"\r
+EndProject\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "MessagePack", "contrib\build\msgpack-c\msgpack_vc8.vcxproj", "{122A2EA4-B283-4241-9655-786DE78283B2}"\r
+EndProject\r
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "tools", "tools", "{296E2989-CF09-43AC-BBD2-BCF1A4B56D07}"\r
+EndProject\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "dhtnode", "dhtnode.vcxproj", "{BF92AECF-AA1D-4B05-9D00-0247E92A24B5}"\r
+ ProjectSection(ProjectDependencies) = postProject\r
+ {711397CE-E5D5-467D-9457-8716C047E50C} = {711397CE-E5D5-467D-9457-8716C047E50C}\r
+ EndProjectSection\r
+EndProject\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "dhtscanner", "dhtscanner.vcxproj", "{78443BCD-4689-4007-A246-F8F34B27F561}"\r
+ ProjectSection(ProjectDependencies) = postProject\r
+ {711397CE-E5D5-467D-9457-8716C047E50C} = {711397CE-E5D5-467D-9457-8716C047E50C}\r
+ EndProjectSection\r
+EndProject\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "dhtchat", "dhtchat.vcxproj", "{8BE7F14D-B227-4D54-9105-7E5473F2D0BA}"\r
+ ProjectSection(ProjectDependencies) = postProject\r
+ {711397CE-E5D5-467D-9457-8716C047E50C} = {711397CE-E5D5-467D-9457-8716C047E50C}\r
+ EndProjectSection\r
+EndProject\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Argon2Ref", "contrib\build\argon2\vs2015\Argon2Ref\Argon2Ref.vcxproj", "{B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}"\r
+EndProject\r
+Global\r
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution\r
+ Debug|x64 = Debug|x64\r
+ Debug|x86 = Debug|x86\r
+ Release|x64 = Release|x64\r
+ Release|x86 = Release|x86\r
+ ReleaseStatic|x64 = ReleaseStatic|x64\r
+ ReleaseStatic|x86 = ReleaseStatic|x86\r
+ EndGlobalSection\r
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.Debug|x64.ActiveCfg = Debug|x64\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.Debug|x64.Build.0 = Debug|x64\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.Debug|x86.ActiveCfg = Debug|Win32\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.Debug|x86.Build.0 = Debug|Win32\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.Release|x64.ActiveCfg = Release|x64\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.Release|x64.Build.0 = Release|x64\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.Release|x86.ActiveCfg = Release|Win32\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.Release|x86.Build.0 = Release|Win32\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.ReleaseStatic|x64.ActiveCfg = Release|x64\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.ReleaseStatic|x64.Build.0 = Release|x64\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.ReleaseStatic|x86.ActiveCfg = Release|Win32\r
+ {711397CE-E5D5-467D-9457-8716C047E50C}.ReleaseStatic|x86.Build.0 = Release|Win32\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.Debug|x64.ActiveCfg = Debug|x64\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.Debug|x64.Build.0 = Debug|x64\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.Debug|x86.ActiveCfg = Debug|Win32\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.Debug|x86.Build.0 = Debug|Win32\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.Release|x64.ActiveCfg = Release|x64\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.Release|x64.Build.0 = Release|x64\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.Release|x86.ActiveCfg = Release|Win32\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.Release|x86.Build.0 = Release|Win32\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.ReleaseStatic|x64.ActiveCfg = ReleaseLTO|x64\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.ReleaseStatic|x64.Build.0 = ReleaseLTO|x64\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.ReleaseStatic|x86.ActiveCfg = ReleaseLTO|Win32\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2}.ReleaseStatic|x86.Build.0 = ReleaseLTO|Win32\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.Debug|x64.ActiveCfg = Debug|x64\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.Debug|x64.Build.0 = Debug|x64\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.Debug|x86.ActiveCfg = Debug|Win32\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.Debug|x86.Build.0 = Debug|Win32\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.Release|x64.ActiveCfg = Release|x64\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.Release|x64.Build.0 = Release|x64\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.Release|x86.ActiveCfg = Release|Win32\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.Release|x86.Build.0 = Release|Win32\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.ReleaseStatic|x64.ActiveCfg = ReleaseLTO|x64\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.ReleaseStatic|x64.Build.0 = ReleaseLTO|x64\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.ReleaseStatic|x86.ActiveCfg = ReleaseLTO|Win32\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02}.ReleaseStatic|x86.Build.0 = ReleaseLTO|Win32\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.Debug|x64.ActiveCfg = Debug|x64\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.Debug|x64.Build.0 = Debug|x64\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.Debug|x86.ActiveCfg = Debug|Win32\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.Debug|x86.Build.0 = Debug|Win32\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.Release|x64.ActiveCfg = Release|x64\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.Release|x64.Build.0 = Release|x64\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.Release|x86.ActiveCfg = Release|Win32\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.Release|x86.Build.0 = Release|Win32\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.ReleaseStatic|x64.ActiveCfg = ReleaseLTO|x64\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.ReleaseStatic|x64.Build.0 = ReleaseLTO|x64\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.ReleaseStatic|x86.ActiveCfg = ReleaseLTO|Win32\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B}.ReleaseStatic|x86.Build.0 = ReleaseLTO|Win32\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.Debug|x64.ActiveCfg = Debug|x64\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.Debug|x64.Build.0 = Debug|x64\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.Debug|x86.ActiveCfg = Debug|Win32\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.Debug|x86.Build.0 = Debug|Win32\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.Release|x64.ActiveCfg = Release|x64\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.Release|x64.Build.0 = Release|x64\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.Release|x86.ActiveCfg = Release|Win32\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.Release|x86.Build.0 = Release|Win32\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.ReleaseStatic|x64.ActiveCfg = ReleaseLTO|x64\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.ReleaseStatic|x64.Build.0 = ReleaseLTO|x64\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.ReleaseStatic|x86.ActiveCfg = ReleaseLTO|Win32\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C}.ReleaseStatic|x86.Build.0 = ReleaseLTO|Win32\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.Debug|x64.ActiveCfg = Debug|x64\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.Debug|x64.Build.0 = Debug|x64\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.Debug|x86.ActiveCfg = Debug|Win32\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.Debug|x86.Build.0 = Debug|Win32\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.Release|x64.ActiveCfg = Release|x64\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.Release|x64.Build.0 = Release|x64\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.Release|x86.ActiveCfg = Release|Win32\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.Release|x86.Build.0 = Release|Win32\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.ReleaseStatic|x64.ActiveCfg = ReleaseLTO|x64\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.ReleaseStatic|x64.Build.0 = ReleaseLTO|x64\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.ReleaseStatic|x86.ActiveCfg = ReleaseLTO|Win32\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518}.ReleaseStatic|x86.Build.0 = ReleaseLTO|Win32\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.Debug|x64.ActiveCfg = Debug|x64\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.Debug|x64.Build.0 = Debug|x64\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.Debug|x86.ActiveCfg = Debug|Win32\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.Debug|x86.Build.0 = Debug|Win32\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.Release|x64.ActiveCfg = Release|x64\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.Release|x64.Build.0 = Release|x64\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.Release|x86.ActiveCfg = Release|Win32\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.Release|x86.Build.0 = Release|Win32\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.ReleaseStatic|x64.ActiveCfg = ReleaseLTO|x64\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.ReleaseStatic|x64.Build.0 = ReleaseLTO|x64\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.ReleaseStatic|x86.ActiveCfg = ReleaseLTO|Win32\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32}.ReleaseStatic|x86.Build.0 = ReleaseLTO|Win32\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.Debug|x64.ActiveCfg = Debug|x64\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.Debug|x64.Build.0 = Debug|x64\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.Debug|x86.ActiveCfg = Debug|Win32\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.Debug|x86.Build.0 = Debug|Win32\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.Release|x64.ActiveCfg = Release|Win32\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.Release|x86.ActiveCfg = Release|Win32\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.Release|x86.Build.0 = Release|Win32\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.ReleaseStatic|x64.ActiveCfg = Release|x64\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.ReleaseStatic|x64.Build.0 = Release|x64\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.ReleaseStatic|x86.ActiveCfg = Release|Win32\r
+ {122A2EA4-B283-4241-9655-786DE78283B2}.ReleaseStatic|x86.Build.0 = Release|Win32\r
+ {BF92AECF-AA1D-4B05-9D00-0247E92A24B5}.Debug|x64.ActiveCfg = Debug|x64\r
+ {BF92AECF-AA1D-4B05-9D00-0247E92A24B5}.Debug|x64.Build.0 = Debug|x64\r
+ {BF92AECF-AA1D-4B05-9D00-0247E92A24B5}.Debug|x86.ActiveCfg = Debug|Win32\r
+ {BF92AECF-AA1D-4B05-9D00-0247E92A24B5}.Debug|x86.Build.0 = Debug|Win32\r
+ {BF92AECF-AA1D-4B05-9D00-0247E92A24B5}.Release|x64.ActiveCfg = Release|x64\r
+ {BF92AECF-AA1D-4B05-9D00-0247E92A24B5}.Release|x64.Build.0 = Release|x64\r
+ {BF92AECF-AA1D-4B05-9D00-0247E92A24B5}.Release|x86.ActiveCfg = Release|Win32\r
+ {BF92AECF-AA1D-4B05-9D00-0247E92A24B5}.Release|x86.Build.0 = Release|Win32\r
+ {BF92AECF-AA1D-4B05-9D00-0247E92A24B5}.ReleaseStatic|x64.ActiveCfg = Release|x64\r
+ {BF92AECF-AA1D-4B05-9D00-0247E92A24B5}.ReleaseStatic|x64.Build.0 = Release|x64\r
+ {BF92AECF-AA1D-4B05-9D00-0247E92A24B5}.ReleaseStatic|x86.ActiveCfg = Release|Win32\r
+ {BF92AECF-AA1D-4B05-9D00-0247E92A24B5}.ReleaseStatic|x86.Build.0 = Release|Win32\r
+ {78443BCD-4689-4007-A246-F8F34B27F561}.Debug|x64.ActiveCfg = Debug|x64\r
+ {78443BCD-4689-4007-A246-F8F34B27F561}.Debug|x64.Build.0 = Debug|x64\r
+ {78443BCD-4689-4007-A246-F8F34B27F561}.Debug|x86.ActiveCfg = Debug|Win32\r
+ {78443BCD-4689-4007-A246-F8F34B27F561}.Debug|x86.Build.0 = Debug|Win32\r
+ {78443BCD-4689-4007-A246-F8F34B27F561}.Release|x64.ActiveCfg = Release|x64\r
+ {78443BCD-4689-4007-A246-F8F34B27F561}.Release|x64.Build.0 = Release|x64\r
+ {78443BCD-4689-4007-A246-F8F34B27F561}.Release|x86.ActiveCfg = Release|Win32\r
+ {78443BCD-4689-4007-A246-F8F34B27F561}.Release|x86.Build.0 = Release|Win32\r
+ {78443BCD-4689-4007-A246-F8F34B27F561}.ReleaseStatic|x64.ActiveCfg = Release|x64\r
+ {78443BCD-4689-4007-A246-F8F34B27F561}.ReleaseStatic|x64.Build.0 = Release|x64\r
+ {78443BCD-4689-4007-A246-F8F34B27F561}.ReleaseStatic|x86.ActiveCfg = Release|Win32\r
+ {78443BCD-4689-4007-A246-F8F34B27F561}.ReleaseStatic|x86.Build.0 = Release|Win32\r
+ {8BE7F14D-B227-4D54-9105-7E5473F2D0BA}.Debug|x64.ActiveCfg = Debug|x64\r
+ {8BE7F14D-B227-4D54-9105-7E5473F2D0BA}.Debug|x64.Build.0 = Debug|x64\r
+ {8BE7F14D-B227-4D54-9105-7E5473F2D0BA}.Debug|x86.ActiveCfg = Debug|Win32\r
+ {8BE7F14D-B227-4D54-9105-7E5473F2D0BA}.Debug|x86.Build.0 = Debug|Win32\r
+ {8BE7F14D-B227-4D54-9105-7E5473F2D0BA}.Release|x64.ActiveCfg = Release|x64\r
+ {8BE7F14D-B227-4D54-9105-7E5473F2D0BA}.Release|x64.Build.0 = Release|x64\r
+ {8BE7F14D-B227-4D54-9105-7E5473F2D0BA}.Release|x86.ActiveCfg = Release|Win32\r
+ {8BE7F14D-B227-4D54-9105-7E5473F2D0BA}.Release|x86.Build.0 = Release|Win32\r
+ {8BE7F14D-B227-4D54-9105-7E5473F2D0BA}.ReleaseStatic|x64.ActiveCfg = Release|x64\r
+ {8BE7F14D-B227-4D54-9105-7E5473F2D0BA}.ReleaseStatic|x64.Build.0 = Release|x64\r
+ {8BE7F14D-B227-4D54-9105-7E5473F2D0BA}.ReleaseStatic|x86.ActiveCfg = Release|Win32\r
+ {8BE7F14D-B227-4D54-9105-7E5473F2D0BA}.ReleaseStatic|x86.Build.0 = Release|Win32\r
+ {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.Debug|x64.ActiveCfg = Debug|x64\r
+ {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.Debug|x64.Build.0 = Debug|x64\r
+ {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.Debug|x86.ActiveCfg = Debug|Win32\r
+ {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.Debug|x86.Build.0 = Debug|Win32\r
+ {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.Release|x64.ActiveCfg = Release|x64\r
+ {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.Release|x64.Build.0 = Release|x64\r
+ {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.Release|x86.ActiveCfg = Release|Win32\r
+ {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.Release|x86.Build.0 = Release|Win32\r
+ {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.ReleaseStatic|x64.ActiveCfg = ReleaseStatic|x64\r
+ {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.ReleaseStatic|x64.Build.0 = ReleaseStatic|x64\r
+ {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.ReleaseStatic|x86.ActiveCfg = ReleaseStatic|Win32\r
+ {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.ReleaseStatic|x86.Build.0 = ReleaseStatic|Win32\r
+ EndGlobalSection\r
+ GlobalSection(SolutionProperties) = preSolution\r
+ HideSolutionNode = FALSE\r
+ EndGlobalSection\r
+ GlobalSection(NestedProjects) = preSolution\r
+ {02B94302-23D6-43EF-8865-95CDE99D5DC2} = {71A1A9EC-1104-4335-A87E-AF8749FD5B34}\r
+ {6F610EE8-7F59-4191-AB88-F63843267C02} = {71A1A9EC-1104-4335-A87E-AF8749FD5B34}\r
+ {CB8BB76F-D3FF-434E-A85E-7FFC0893EC9B} = {71A1A9EC-1104-4335-A87E-AF8749FD5B34}\r
+ {200F3D3C-8B84-46D8-953A-6C0EBD283B5C} = {71A1A9EC-1104-4335-A87E-AF8749FD5B34}\r
+ {070FEF2B-0C3F-4F33-9D3C-53C7330BF518} = {71A1A9EC-1104-4335-A87E-AF8749FD5B34}\r
+ {CA9A4A38-CC63-4BDB-8CFB-E058965DDA32} = {71A1A9EC-1104-4335-A87E-AF8749FD5B34}\r
+ {122A2EA4-B283-4241-9655-786DE78283B2} = {71A1A9EC-1104-4335-A87E-AF8749FD5B34}\r
+ {BF92AECF-AA1D-4B05-9D00-0247E92A24B5} = {296E2989-CF09-43AC-BBD2-BCF1A4B56D07}\r
+ {78443BCD-4689-4007-A246-F8F34B27F561} = {296E2989-CF09-43AC-BBD2-BCF1A4B56D07}\r
+ {8BE7F14D-B227-4D54-9105-7E5473F2D0BA} = {296E2989-CF09-43AC-BBD2-BCF1A4B56D07}\r
+ {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2} = {71A1A9EC-1104-4335-A87E-AF8749FD5B34}\r
+ EndGlobalSection\r
+EndGlobal\r
--- /dev/null
+/*
+ * Copyright (C) 2016 Savoir-faire Linux Inc.
+ *
+ * Author: Andreas Traczyk <andreas.traczyk@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#pragma once
+
+#ifndef __STRICT_ANSI__
+
+#include <stdlib.h>
+#include <process.h>
+#include <direct.h>
+#include <fcntl.h>
+
+#define R_OK 4
+#define W_OK 2
+#define F_OK 0
+
+#ifndef STDIN_FILENO
+#define STDIN_FILENO 0
+#endif
+
+#ifndef STDOUT_FILENO
+#define STDOUT_FILENO 1
+#endif
+
+#ifndef STDERR_FILENO
+#define STDERR_FILENO 2
+#endif
+
+#define srandom srand
+#define random rand
+
+#define inline __inline
+typedef int mode_t;
+#include <BaseTsd.h>
+
+#endif
--- /dev/null
+/* Getopt for Microsoft C\r
+This code is a modification of the Free Software Foundation, Inc.\r
+Getopt library for parsing command line argument the purpose was\r
+to provide a Microsoft Visual C friendly derivative. This code\r
+provides functionality for both Unicode and Multibyte builds.\r
+Date: 02/03/2011 - Ludvik Jerabek - Initial Release\r
+Version: 1.0\r
+Comment: Supports getopt, getopt_long, and getopt_long_only\r
+and POSIXLY_CORRECT environment flag\r
+License: LGPL\r
+Revisions:\r
+02/03/2011 - Ludvik Jerabek - Initial Release\r
+02/20/2011 - Ludvik Jerabek - Fixed compiler warnings at Level 4\r
+07/05/2011 - Ludvik Jerabek - Added no_argument, required_argument, optional_argument defs\r
+08/03/2011 - Ludvik Jerabek - Fixed non-argument runtime bug which caused runtime exception\r
+08/09/2011 - Ludvik Jerabek - Added code to export functions for DLL and LIB\r
+02/15/2012 - Ludvik Jerabek - Fixed _GETOPT_THROW definition missing in implementation file\r
+08/01/2012 - Ludvik Jerabek - Created separate functions for char and wchar_t characters so single dll can do both unicode and ansi\r
+10/15/2012 - Ludvik Jerabek - Modified to match latest GNU features\r
+06/19/2015 - Ludvik Jerabek - Fixed maximum option limitation caused by option_a (255) and option_w (65535) structure val variable\r
+**DISCLAIMER**\r
+THIS MATERIAL IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,\r
+EITHER EXPRESS OR IMPLIED, INCLUDING, BUT Not LIMITED TO, THE\r
+IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\r
+PURPOSE, OR NON-INFRINGEMENT. SOME JURISDICTIONS DO NOT ALLOW THE\r
+EXCLUSION OF IMPLIED WARRANTIES, SO THE ABOVE EXCLUSION MAY NOT\r
+APPLY TO YOU. IN NO EVENT WILL I BE LIABLE TO ANY PARTY FOR ANY\r
+DIRECT, INDIRECT, SPECIAL OR OTHER CONSEQUENTIAL DAMAGES FOR ANY\r
+USE OF THIS MATERIAL INCLUDING, WITHOUT LIMITATION, ANY LOST\r
+PROFITS, BUSINESS INTERRUPTION, LOSS OF PROGRAMS OR OTHER DATA ON\r
+YOUR INFORMATION HANDLING SYSTEM OR OTHERWISE, EVEN If WE ARE\r
+EXPRESSLY ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.\r
+*/\r
+#ifndef _CRT_SECURE_NO_WARNINGS\r
+# define _CRT_SECURE_NO_WARNINGS\r
+#endif\r
+#include <stdlib.h>\r
+#include <stdio.h>\r
+#include <malloc.h>\r
+#include "wingetopt.h"\r
+\r
+#ifdef __cplusplus\r
+ #define _GETOPT_THROW throw()\r
+#else\r
+ #define _GETOPT_THROW\r
+#endif\r
+\r
+int optind = 1;\r
+int opterr = 1;\r
+int optopt = '?';\r
+enum ENUM_ORDERING { REQUIRE_ORDER, PERMUTE, RETURN_IN_ORDER };\r
+\r
+//\r
+//\r
+// Ansi structures and functions follow\r
+//\r
+//\r
+\r
+static struct _getopt_data_a\r
+{\r
+ int optind;\r
+ int opterr;\r
+ int optopt;\r
+ char *optarg;\r
+ int __initialized;\r
+ char *__nextchar;\r
+ enum ENUM_ORDERING __ordering;\r
+ int __posixly_correct;\r
+ int __first_nonopt;\r
+ int __last_nonopt;\r
+} getopt_data_a;\r
+char *optarg_a;\r
+\r
+static void exchange_a(char **argv, struct _getopt_data_a *d)\r
+{\r
+ int bottom = d->__first_nonopt;\r
+ int middle = d->__last_nonopt;\r
+ int top = d->optind;\r
+ char *tem;\r
+ while (top > middle && middle > bottom)\r
+ {\r
+ if (top - middle > middle - bottom)\r
+ {\r
+ int len = middle - bottom;\r
+ register int i;\r
+ for (i = 0; i < len; i++)\r
+ {\r
+ tem = argv[bottom + i];\r
+ argv[bottom + i] = argv[top - (middle - bottom) + i];\r
+ argv[top - (middle - bottom) + i] = tem;\r
+ }\r
+ top -= len;\r
+ }\r
+ else\r
+ {\r
+ int len = top - middle;\r
+ register int i;\r
+ for (i = 0; i < len; i++)\r
+ {\r
+ tem = argv[bottom + i];\r
+ argv[bottom + i] = argv[middle + i];\r
+ argv[middle + i] = tem;\r
+ }\r
+ bottom += len;\r
+ }\r
+ }\r
+ d->__first_nonopt += (d->optind - d->__last_nonopt);\r
+ d->__last_nonopt = d->optind;\r
+}\r
+static const char *_getopt_initialize_a (const char *optstring, struct _getopt_data_a *d, int posixly_correct)\r
+{\r
+ d->__first_nonopt = d->__last_nonopt = d->optind;\r
+ d->__nextchar = NULL;\r
+ d->__posixly_correct = posixly_correct | !!getenv("POSIXLY_CORRECT");\r
+ if (optstring[0] == '-')\r
+ {\r
+ d->__ordering = RETURN_IN_ORDER;\r
+ ++optstring;\r
+ }\r
+ else if (optstring[0] == '+')\r
+ {\r
+ d->__ordering = REQUIRE_ORDER;\r
+ ++optstring;\r
+ }\r
+ else if (d->__posixly_correct)\r
+ d->__ordering = REQUIRE_ORDER;\r
+ else\r
+ d->__ordering = PERMUTE;\r
+ return optstring;\r
+}\r
+int _getopt_internal_r_a (int argc, char *const *argv, const char *optstring, const struct option_a *longopts, int *longind, int long_only, struct _getopt_data_a *d, int posixly_correct)\r
+{\r
+ int print_errors = d->opterr;\r
+ if (argc < 1)\r
+ return -1;\r
+ d->optarg = NULL;\r
+ if (d->optind == 0 || !d->__initialized)\r
+ {\r
+ if (d->optind == 0)\r
+ d->optind = 1;\r
+ optstring = _getopt_initialize_a (optstring, d, posixly_correct);\r
+ d->__initialized = 1;\r
+ }\r
+ else if (optstring[0] == '-' || optstring[0] == '+')\r
+ optstring++;\r
+ if (optstring[0] == ':')\r
+ print_errors = 0;\r
+ if (d->__nextchar == NULL || *d->__nextchar == '\0')\r
+ {\r
+ if (d->__last_nonopt > d->optind)\r
+ d->__last_nonopt = d->optind;\r
+ if (d->__first_nonopt > d->optind)\r
+ d->__first_nonopt = d->optind;\r
+ if (d->__ordering == PERMUTE)\r
+ {\r
+ if (d->__first_nonopt != d->__last_nonopt && d->__last_nonopt != d->optind)\r
+ exchange_a ((char **) argv, d);\r
+ else if (d->__last_nonopt != d->optind)\r
+ d->__first_nonopt = d->optind;\r
+ while (d->optind < argc && (argv[d->optind][0] != '-' || argv[d->optind][1] == '\0'))\r
+ d->optind++;\r
+ d->__last_nonopt = d->optind;\r
+ }\r
+ if (d->optind != argc && !strcmp(argv[d->optind], "--"))\r
+ {\r
+ d->optind++;\r
+ if (d->__first_nonopt != d->__last_nonopt && d->__last_nonopt != d->optind)\r
+ exchange_a((char **) argv, d);\r
+ else if (d->__first_nonopt == d->__last_nonopt)\r
+ d->__first_nonopt = d->optind;\r
+ d->__last_nonopt = argc;\r
+ d->optind = argc;\r
+ }\r
+ if (d->optind == argc)\r
+ {\r
+ if (d->__first_nonopt != d->__last_nonopt)\r
+ d->optind = d->__first_nonopt;\r
+ return -1;\r
+ }\r
+ if ((argv[d->optind][0] != '-' || argv[d->optind][1] == '\0'))\r
+ {\r
+ if (d->__ordering == REQUIRE_ORDER)\r
+ return -1;\r
+ d->optarg = argv[d->optind++];\r
+ return 1;\r
+ }\r
+ d->__nextchar = (argv[d->optind] + 1 + (longopts != NULL && argv[d->optind][1] == '-'));\r
+ }\r
+ if (longopts != NULL && (argv[d->optind][1] == '-' || (long_only && (argv[d->optind][2] || !strchr(optstring, argv[d->optind][1])))))\r
+ {\r
+ char *nameend;\r
+ unsigned int namelen;\r
+ const struct option_a *p;\r
+ const struct option_a *pfound = NULL;\r
+ struct option_list\r
+ {\r
+ const struct option_a *p;\r
+ struct option_list *next;\r
+ } *ambig_list = NULL;\r
+ int exact = 0;\r
+ int indfound = -1;\r
+ int option_index;\r
+ for (nameend = d->__nextchar; *nameend && *nameend != '='; nameend++);\r
+ namelen = (unsigned int)(nameend - d->__nextchar);\r
+ for (p = longopts, option_index = 0; p->name; p++, option_index++)\r
+ if (!strncmp(p->name, d->__nextchar, namelen))\r
+ {\r
+ if (namelen == (unsigned int)strlen(p->name))\r
+ {\r
+ pfound = p;\r
+ indfound = option_index;\r
+ exact = 1;\r
+ break;\r
+ }\r
+ else if (pfound == NULL)\r
+ {\r
+ pfound = p;\r
+ indfound = option_index;\r
+ }\r
+ else if (long_only || pfound->has_arg != p->has_arg || pfound->flag != p->flag || pfound->val != p->val)\r
+ {\r
+ struct option_list *newp = (struct option_list*)alloca(sizeof(*newp));\r
+ newp->p = p;\r
+ newp->next = ambig_list;\r
+ ambig_list = newp;\r
+ }\r
+ }\r
+ if (ambig_list != NULL && !exact)\r
+ {\r
+ if (print_errors)\r
+ {\r
+ struct option_list first;\r
+ first.p = pfound;\r
+ first.next = ambig_list;\r
+ ambig_list = &first;\r
+ fprintf (stderr, "%s: option '%s' is ambiguous; possibilities:", argv[0], argv[d->optind]);\r
+ do\r
+ {\r
+ fprintf (stderr, " '--%s'", ambig_list->p->name);\r
+ ambig_list = ambig_list->next;\r
+ }\r
+ while (ambig_list != NULL);\r
+ fputc ('\n', stderr);\r
+ }\r
+ d->__nextchar += strlen(d->__nextchar);\r
+ d->optind++;\r
+ d->optopt = 0;\r
+ return '?';\r
+ }\r
+ if (pfound != NULL)\r
+ {\r
+ option_index = indfound;\r
+ d->optind++;\r
+ if (*nameend)\r
+ {\r
+ if (pfound->has_arg)\r
+ d->optarg = nameend + 1;\r
+ else\r
+ {\r
+ if (print_errors)\r
+ {\r
+ if (argv[d->optind - 1][1] == '-')\r
+ {\r
+ fprintf(stderr, "%s: option '--%s' doesn't allow an argument\n",argv[0], pfound->name);\r
+ }\r
+ else\r
+ {\r
+ fprintf(stderr, "%s: option '%c%s' doesn't allow an argument\n",argv[0], argv[d->optind - 1][0],pfound->name);\r
+ }\r
+ }\r
+ d->__nextchar += strlen(d->__nextchar);\r
+ d->optopt = pfound->val;\r
+ return '?';\r
+ }\r
+ }\r
+ else if (pfound->has_arg == 1)\r
+ {\r
+ if (d->optind < argc)\r
+ d->optarg = argv[d->optind++];\r
+ else\r
+ {\r
+ if (print_errors)\r
+ {\r
+ fprintf(stderr,"%s: option '--%s' requires an argument\n",argv[0], pfound->name);\r
+ }\r
+ d->__nextchar += strlen(d->__nextchar);\r
+ d->optopt = pfound->val;\r
+ return optstring[0] == ':' ? ':' : '?';\r
+ }\r
+ }\r
+ d->__nextchar += strlen(d->__nextchar);\r
+ if (longind != NULL)\r
+ *longind = option_index;\r
+ if (pfound->flag)\r
+ {\r
+ *(pfound->flag) = pfound->val;\r
+ return 0;\r
+ }\r
+ return pfound->val;\r
+ }\r
+ if (!long_only || argv[d->optind][1] == '-' || strchr(optstring, *d->__nextchar) == NULL)\r
+ {\r
+ if (print_errors)\r
+ {\r
+ if (argv[d->optind][1] == '-')\r
+ {\r
+ fprintf(stderr, "%s: unrecognized option '--%s'\n",argv[0], d->__nextchar);\r
+ }\r
+ else\r
+ {\r
+ fprintf(stderr, "%s: unrecognized option '%c%s'\n",argv[0], argv[d->optind][0], d->__nextchar);\r
+ }\r
+ }\r
+ d->__nextchar = (char *)"";\r
+ d->optind++;\r
+ d->optopt = 0;\r
+ return '?';\r
+ }\r
+ }\r
+ {\r
+ char c = *d->__nextchar++;\r
+ char *temp = (char*)strchr(optstring, c);\r
+ if (*d->__nextchar == '\0')\r
+ ++d->optind;\r
+ if (temp == NULL || c == ':' || c == ';')\r
+ {\r
+ if (print_errors)\r
+ {\r
+ fprintf(stderr, "%s: invalid option -- '%c'\n", argv[0], c);\r
+ }\r
+ d->optopt = c;\r
+ return '?';\r
+ }\r
+ if (temp[0] == 'W' && temp[1] == ';')\r
+ {\r
+ char *nameend;\r
+ const struct option_a *p;\r
+ const struct option_a *pfound = NULL;\r
+ int exact = 0;\r
+ int ambig = 0;\r
+ int indfound = 0;\r
+ int option_index;\r
+ if (longopts == NULL)\r
+ goto no_longs;\r
+ if (*d->__nextchar != '\0')\r
+ {\r
+ d->optarg = d->__nextchar;\r
+ d->optind++;\r
+ }\r
+ else if (d->optind == argc)\r
+ {\r
+ if (print_errors)\r
+ {\r
+ fprintf(stderr,"%s: option requires an argument -- '%c'\n",argv[0], c);\r
+ }\r
+ d->optopt = c;\r
+ if (optstring[0] == ':')\r
+ c = ':';\r
+ else\r
+ c = '?';\r
+ return c;\r
+ }\r
+ else\r
+ d->optarg = argv[d->optind++];\r
+ for (d->__nextchar = nameend = d->optarg; *nameend && *nameend != '='; nameend++);\r
+ for (p = longopts, option_index = 0; p->name; p++, option_index++)\r
+ if (!strncmp(p->name, d->__nextchar, nameend - d->__nextchar))\r
+ {\r
+ if ((unsigned int) (nameend - d->__nextchar) == strlen(p->name))\r
+ {\r
+ pfound = p;\r
+ indfound = option_index;\r
+ exact = 1;\r
+ break;\r
+ }\r
+ else if (pfound == NULL)\r
+ {\r
+ pfound = p;\r
+ indfound = option_index;\r
+ }\r
+ else if (long_only || pfound->has_arg != p->has_arg || pfound->flag != p->flag || pfound->val != p->val)\r
+ ambig = 1;\r
+ }\r
+ if (ambig && !exact)\r
+ {\r
+ if (print_errors)\r
+ {\r
+ fprintf(stderr, "%s: option '-W %s' is ambiguous\n",argv[0], d->optarg);\r
+ }\r
+ d->__nextchar += strlen(d->__nextchar);\r
+ d->optind++;\r
+ return '?';\r
+ }\r
+ if (pfound != NULL)\r
+ {\r
+ option_index = indfound;\r
+ if (*nameend)\r
+ {\r
+ if (pfound->has_arg)\r
+ d->optarg = nameend + 1;\r
+ else\r
+ {\r
+ if (print_errors)\r
+ {\r
+ fprintf(stderr, "%s: option '-W %s' doesn't allow an argument\n",argv[0], pfound->name);\r
+ }\r
+ d->__nextchar += strlen(d->__nextchar);\r
+ return '?';\r
+ }\r
+ }\r
+ else if (pfound->has_arg == 1)\r
+ {\r
+ if (d->optind < argc)\r
+ d->optarg = argv[d->optind++];\r
+ else\r
+ {\r
+ if (print_errors)\r
+ {\r
+ fprintf(stderr, "%s: option '-W %s' requires an argument\n",argv[0], pfound->name);\r
+ }\r
+ d->__nextchar += strlen(d->__nextchar);\r
+ return optstring[0] == ':' ? ':' : '?';\r
+ }\r
+ }\r
+ else\r
+ d->optarg = NULL;\r
+ d->__nextchar += strlen(d->__nextchar);\r
+ if (longind != NULL)\r
+ *longind = option_index;\r
+ if (pfound->flag)\r
+ {\r
+ *(pfound->flag) = pfound->val;\r
+ return 0;\r
+ }\r
+ return pfound->val;\r
+ }\r
+no_longs:\r
+ d->__nextchar = NULL;\r
+ return 'W';\r
+ }\r
+ if (temp[1] == ':')\r
+ {\r
+ if (temp[2] == ':')\r
+ {\r
+ if (*d->__nextchar != '\0')\r
+ {\r
+ d->optarg = d->__nextchar;\r
+ d->optind++;\r
+ }\r
+ else\r
+ d->optarg = NULL;\r
+ d->__nextchar = NULL;\r
+ }\r
+ else\r
+ {\r
+ if (*d->__nextchar != '\0')\r
+ {\r
+ d->optarg = d->__nextchar;\r
+ d->optind++;\r
+ }\r
+ else if (d->optind == argc)\r
+ {\r
+ if (print_errors)\r
+ {\r
+ fprintf(stderr,"%s: option requires an argument -- '%c'\n",argv[0], c);\r
+ }\r
+ d->optopt = c;\r
+ if (optstring[0] == ':')\r
+ c = ':';\r
+ else\r
+ c = '?';\r
+ }\r
+ else\r
+ d->optarg = argv[d->optind++];\r
+ d->__nextchar = NULL;\r
+ }\r
+ }\r
+ return c;\r
+ }\r
+}\r
+int _getopt_internal_a (int argc, char *const *argv, const char *optstring, const struct option_a *longopts, int *longind, int long_only, int posixly_correct)\r
+{\r
+ int result;\r
+ getopt_data_a.optind = optind;\r
+ getopt_data_a.opterr = opterr;\r
+ result = _getopt_internal_r_a (argc, argv, optstring, longopts,longind, long_only, &getopt_data_a,posixly_correct);\r
+ optind = getopt_data_a.optind;\r
+ optarg_a = getopt_data_a.optarg;\r
+ optopt = getopt_data_a.optopt;\r
+ return result;\r
+}\r
+int getopt_a (int argc, char *const *argv, const char *optstring) _GETOPT_THROW\r
+{\r
+ return _getopt_internal_a (argc, argv, optstring, (const struct option_a *) 0, (int *) 0, 0, 0);\r
+}\r
+int getopt_long_a (int argc, char *const *argv, const char *options, const struct option_a *long_options, int *opt_index) _GETOPT_THROW\r
+{\r
+ return _getopt_internal_a (argc, argv, options, long_options, opt_index, 0, 0);\r
+}\r
+int getopt_long_only_a (int argc, char *const *argv, const char *options, const struct option_a *long_options, int *opt_index) _GETOPT_THROW\r
+{\r
+ return _getopt_internal_a (argc, argv, options, long_options, opt_index, 1, 0);\r
+}\r
+int _getopt_long_r_a (int argc, char *const *argv, const char *options, const struct option_a *long_options, int *opt_index, struct _getopt_data_a *d)\r
+{\r
+ return _getopt_internal_r_a (argc, argv, options, long_options, opt_index,0, d, 0);\r
+}\r
+int _getopt_long_only_r_a (int argc, char *const *argv, const char *options, const struct option_a *long_options, int *opt_index, struct _getopt_data_a *d)\r
+{\r
+ return _getopt_internal_r_a (argc, argv, options, long_options, opt_index, 1, d, 0);\r
+}\r
+\r
+//\r
+//\r
+// Unicode Structures and Functions\r
+//\r
+//\r
+\r
+static struct _getopt_data_w\r
+{\r
+ int optind;\r
+ int opterr;\r
+ int optopt;\r
+ wchar_t *optarg;\r
+ int __initialized;\r
+ wchar_t *__nextchar;\r
+ enum ENUM_ORDERING __ordering;\r
+ int __posixly_correct;\r
+ int __first_nonopt;\r
+ int __last_nonopt;\r
+} getopt_data_w;\r
+wchar_t *optarg_w;\r
+\r
+static void exchange_w(wchar_t **argv, struct _getopt_data_w *d)\r
+{\r
+ int bottom = d->__first_nonopt;\r
+ int middle = d->__last_nonopt;\r
+ int top = d->optind;\r
+ wchar_t *tem;\r
+ while (top > middle && middle > bottom)\r
+ {\r
+ if (top - middle > middle - bottom)\r
+ {\r
+ int len = middle - bottom;\r
+ register int i;\r
+ for (i = 0; i < len; i++)\r
+ {\r
+ tem = argv[bottom + i];\r
+ argv[bottom + i] = argv[top - (middle - bottom) + i];\r
+ argv[top - (middle - bottom) + i] = tem;\r
+ }\r
+ top -= len;\r
+ }\r
+ else\r
+ {\r
+ int len = top - middle;\r
+ register int i;\r
+ for (i = 0; i < len; i++)\r
+ {\r
+ tem = argv[bottom + i];\r
+ argv[bottom + i] = argv[middle + i];\r
+ argv[middle + i] = tem;\r
+ }\r
+ bottom += len;\r
+ }\r
+ }\r
+ d->__first_nonopt += (d->optind - d->__last_nonopt);\r
+ d->__last_nonopt = d->optind;\r
+}\r
+static const wchar_t *_getopt_initialize_w (const wchar_t *optstring, struct _getopt_data_w *d, int posixly_correct)\r
+{\r
+ d->__first_nonopt = d->__last_nonopt = d->optind;\r
+ d->__nextchar = NULL;\r
+ d->__posixly_correct = posixly_correct | !!_wgetenv(L"POSIXLY_CORRECT");\r
+ if (optstring[0] == L'-')\r
+ {\r
+ d->__ordering = RETURN_IN_ORDER;\r
+ ++optstring;\r
+ }\r
+ else if (optstring[0] == L'+')\r
+ {\r
+ d->__ordering = REQUIRE_ORDER;\r
+ ++optstring;\r
+ }\r
+ else if (d->__posixly_correct)\r
+ d->__ordering = REQUIRE_ORDER;\r
+ else\r
+ d->__ordering = PERMUTE;\r
+ return optstring;\r
+}\r
+int _getopt_internal_r_w (int argc, wchar_t *const *argv, const wchar_t *optstring, const struct option_w *longopts, int *longind, int long_only, struct _getopt_data_w *d, int posixly_correct)\r
+{\r
+ int print_errors = d->opterr;\r
+ if (argc < 1)\r
+ return -1;\r
+ d->optarg = NULL;\r
+ if (d->optind == 0 || !d->__initialized)\r
+ {\r
+ if (d->optind == 0)\r
+ d->optind = 1;\r
+ optstring = _getopt_initialize_w (optstring, d, posixly_correct);\r
+ d->__initialized = 1;\r
+ }\r
+ else if (optstring[0] == L'-' || optstring[0] == L'+')\r
+ optstring++;\r
+ if (optstring[0] == L':')\r
+ print_errors = 0;\r
+ if (d->__nextchar == NULL || *d->__nextchar == L'\0')\r
+ {\r
+ if (d->__last_nonopt > d->optind)\r
+ d->__last_nonopt = d->optind;\r
+ if (d->__first_nonopt > d->optind)\r
+ d->__first_nonopt = d->optind;\r
+ if (d->__ordering == PERMUTE)\r
+ {\r
+ if (d->__first_nonopt != d->__last_nonopt && d->__last_nonopt != d->optind)\r
+ exchange_w((wchar_t **) argv, d);\r
+ else if (d->__last_nonopt != d->optind)\r
+ d->__first_nonopt = d->optind;\r
+ while (d->optind < argc && (argv[d->optind][0] != L'-' || argv[d->optind][1] == L'\0'))\r
+ d->optind++;\r
+ d->__last_nonopt = d->optind;\r
+ }\r
+ if (d->optind != argc && !wcscmp(argv[d->optind], L"--"))\r
+ {\r
+ d->optind++;\r
+ if (d->__first_nonopt != d->__last_nonopt && d->__last_nonopt != d->optind)\r
+ exchange_w((wchar_t **) argv, d);\r
+ else if (d->__first_nonopt == d->__last_nonopt)\r
+ d->__first_nonopt = d->optind;\r
+ d->__last_nonopt = argc;\r
+ d->optind = argc;\r
+ }\r
+ if (d->optind == argc)\r
+ {\r
+ if (d->__first_nonopt != d->__last_nonopt)\r
+ d->optind = d->__first_nonopt;\r
+ return -1;\r
+ }\r
+ if ((argv[d->optind][0] != L'-' || argv[d->optind][1] == L'\0'))\r
+ {\r
+ if (d->__ordering == REQUIRE_ORDER)\r
+ return -1;\r
+ d->optarg = argv[d->optind++];\r
+ return 1;\r
+ }\r
+ d->__nextchar = (argv[d->optind] + 1 + (longopts != NULL && argv[d->optind][1] == L'-'));\r
+ }\r
+ if (longopts != NULL && (argv[d->optind][1] == L'-' || (long_only && (argv[d->optind][2] || !wcschr(optstring, argv[d->optind][1])))))\r
+ {\r
+ wchar_t *nameend;\r
+ unsigned int namelen;\r
+ const struct option_w *p;\r
+ const struct option_w *pfound = NULL;\r
+ struct option_list\r
+ {\r
+ const struct option_w *p;\r
+ struct option_list *next;\r
+ } *ambig_list = NULL;\r
+ int exact = 0;\r
+ int indfound = -1;\r
+ int option_index;\r
+ for (nameend = d->__nextchar; *nameend && *nameend != L'='; nameend++);\r
+ namelen = (unsigned int)(nameend - d->__nextchar);\r
+ for (p = longopts, option_index = 0; p->name; p++, option_index++)\r
+ if (!wcsncmp(p->name, d->__nextchar, namelen))\r
+ {\r
+ if (namelen == (unsigned int)wcslen(p->name))\r
+ {\r
+ pfound = p;\r
+ indfound = option_index;\r
+ exact = 1;\r
+ break;\r
+ }\r
+ else if (pfound == NULL)\r
+ {\r
+ pfound = p;\r
+ indfound = option_index;\r
+ }\r
+ else if (long_only || pfound->has_arg != p->has_arg || pfound->flag != p->flag || pfound->val != p->val)\r
+ {\r
+ struct option_list *newp = (struct option_list*)alloca(sizeof(*newp));\r
+ newp->p = p;\r
+ newp->next = ambig_list;\r
+ ambig_list = newp;\r
+ }\r
+ }\r
+ if (ambig_list != NULL && !exact)\r
+ {\r
+ if (print_errors)\r
+ {\r
+ struct option_list first;\r
+ first.p = pfound;\r
+ first.next = ambig_list;\r
+ ambig_list = &first;\r
+ fwprintf(stderr, L"%s: option '%s' is ambiguous; possibilities:", argv[0], argv[d->optind]);\r
+ do\r
+ {\r
+ fwprintf (stderr, L" '--%s'", ambig_list->p->name);\r
+ ambig_list = ambig_list->next;\r
+ }\r
+ while (ambig_list != NULL);\r
+ fputwc (L'\n', stderr);\r
+ }\r
+ d->__nextchar += wcslen(d->__nextchar);\r
+ d->optind++;\r
+ d->optopt = 0;\r
+ return L'?';\r
+ }\r
+ if (pfound != NULL)\r
+ {\r
+ option_index = indfound;\r
+ d->optind++;\r
+ if (*nameend)\r
+ {\r
+ if (pfound->has_arg)\r
+ d->optarg = nameend + 1;\r
+ else\r
+ {\r
+ if (print_errors)\r
+ {\r
+ if (argv[d->optind - 1][1] == L'-')\r
+ {\r
+ fwprintf(stderr, L"%s: option '--%s' doesn't allow an argument\n",argv[0], pfound->name);\r
+ }\r
+ else\r
+ {\r
+ fwprintf(stderr, L"%s: option '%c%s' doesn't allow an argument\n",argv[0], argv[d->optind - 1][0],pfound->name);\r
+ }\r
+ }\r
+ d->__nextchar += wcslen(d->__nextchar);\r
+ d->optopt = pfound->val;\r
+ return L'?';\r
+ }\r
+ }\r
+ else if (pfound->has_arg == 1)\r
+ {\r
+ if (d->optind < argc)\r
+ d->optarg = argv[d->optind++];\r
+ else\r
+ {\r
+ if (print_errors)\r
+ {\r
+ fwprintf(stderr,L"%s: option '--%s' requires an argument\n",argv[0], pfound->name);\r
+ }\r
+ d->__nextchar += wcslen(d->__nextchar);\r
+ d->optopt = pfound->val;\r
+ return optstring[0] == L':' ? L':' : L'?';\r
+ }\r
+ }\r
+ d->__nextchar += wcslen(d->__nextchar);\r
+ if (longind != NULL)\r
+ *longind = option_index;\r
+ if (pfound->flag)\r
+ {\r
+ *(pfound->flag) = pfound->val;\r
+ return 0;\r
+ }\r
+ return pfound->val;\r
+ }\r
+ if (!long_only || argv[d->optind][1] == L'-' || wcschr(optstring, *d->__nextchar) == NULL)\r
+ {\r
+ if (print_errors)\r
+ {\r
+ if (argv[d->optind][1] == L'-')\r
+ {\r
+ fwprintf(stderr, L"%s: unrecognized option '--%s'\n",argv[0], d->__nextchar);\r
+ }\r
+ else\r
+ {\r
+ fwprintf(stderr, L"%s: unrecognized option '%c%s'\n",argv[0], argv[d->optind][0], d->__nextchar);\r
+ }\r
+ }\r
+ d->__nextchar = (wchar_t *)L"";\r
+ d->optind++;\r
+ d->optopt = 0;\r
+ return L'?';\r
+ }\r
+ }\r
+ {\r
+ wchar_t c = *d->__nextchar++;\r
+ wchar_t *temp = (wchar_t*)wcschr(optstring, c);\r
+ if (*d->__nextchar == L'\0')\r
+ ++d->optind;\r
+ if (temp == NULL || c == L':' || c == L';')\r
+ {\r
+ if (print_errors)\r
+ {\r
+ fwprintf(stderr, L"%s: invalid option -- '%c'\n", argv[0], c);\r
+ }\r
+ d->optopt = c;\r
+ return L'?';\r
+ }\r
+ if (temp[0] == L'W' && temp[1] == L';')\r
+ {\r
+ wchar_t *nameend;\r
+ const struct option_w *p;\r
+ const struct option_w *pfound = NULL;\r
+ int exact = 0;\r
+ int ambig = 0;\r
+ int indfound = 0;\r
+ int option_index;\r
+ if (longopts == NULL)\r
+ goto no_longs;\r
+ if (*d->__nextchar != L'\0')\r
+ {\r
+ d->optarg = d->__nextchar;\r
+ d->optind++;\r
+ }\r
+ else if (d->optind == argc)\r
+ {\r
+ if (print_errors)\r
+ {\r
+ fwprintf(stderr,L"%s: option requires an argument -- '%c'\n",argv[0], c);\r
+ }\r
+ d->optopt = c;\r
+ if (optstring[0] == L':')\r
+ c = L':';\r
+ else\r
+ c = L'?';\r
+ return c;\r
+ }\r
+ else\r
+ d->optarg = argv[d->optind++];\r
+ for (d->__nextchar = nameend = d->optarg; *nameend && *nameend != L'='; nameend++);\r
+ for (p = longopts, option_index = 0; p->name; p++, option_index++)\r
+ if (!wcsncmp(p->name, d->__nextchar, nameend - d->__nextchar))\r
+ {\r
+ if ((unsigned int) (nameend - d->__nextchar) == wcslen(p->name))\r
+ {\r
+ pfound = p;\r
+ indfound = option_index;\r
+ exact = 1;\r
+ break;\r
+ }\r
+ else if (pfound == NULL)\r
+ {\r
+ pfound = p;\r
+ indfound = option_index;\r
+ }\r
+ else if (long_only || pfound->has_arg != p->has_arg || pfound->flag != p->flag || pfound->val != p->val)\r
+ ambig = 1;\r
+ }\r
+ if (ambig && !exact)\r
+ {\r
+ if (print_errors)\r
+ {\r
+ fwprintf(stderr, L"%s: option '-W %s' is ambiguous\n",argv[0], d->optarg);\r
+ }\r
+ d->__nextchar += wcslen(d->__nextchar);\r
+ d->optind++;\r
+ return L'?';\r
+ }\r
+ if (pfound != NULL)\r
+ {\r
+ option_index = indfound;\r
+ if (*nameend)\r
+ {\r
+ if (pfound->has_arg)\r
+ d->optarg = nameend + 1;\r
+ else\r
+ {\r
+ if (print_errors)\r
+ {\r
+ fwprintf(stderr, L"%s: option '-W %s' doesn't allow an argument\n",argv[0], pfound->name);\r
+ }\r
+ d->__nextchar += wcslen(d->__nextchar);\r
+ return L'?';\r
+ }\r
+ }\r
+ else if (pfound->has_arg == 1)\r
+ {\r
+ if (d->optind < argc)\r
+ d->optarg = argv[d->optind++];\r
+ else\r
+ {\r
+ if (print_errors)\r
+ {\r
+ fwprintf(stderr, L"%s: option '-W %s' requires an argument\n",argv[0], pfound->name);\r
+ }\r
+ d->__nextchar += wcslen(d->__nextchar);\r
+ return optstring[0] == L':' ? L':' : L'?';\r
+ }\r
+ }\r
+ else\r
+ d->optarg = NULL;\r
+ d->__nextchar += wcslen(d->__nextchar);\r
+ if (longind != NULL)\r
+ *longind = option_index;\r
+ if (pfound->flag)\r
+ {\r
+ *(pfound->flag) = pfound->val;\r
+ return 0;\r
+ }\r
+ return pfound->val;\r
+ }\r
+no_longs:\r
+ d->__nextchar = NULL;\r
+ return L'W';\r
+ }\r
+ if (temp[1] == L':')\r
+ {\r
+ if (temp[2] == L':')\r
+ {\r
+ if (*d->__nextchar != L'\0')\r
+ {\r
+ d->optarg = d->__nextchar;\r
+ d->optind++;\r
+ }\r
+ else\r
+ d->optarg = NULL;\r
+ d->__nextchar = NULL;\r
+ }\r
+ else\r
+ {\r
+ if (*d->__nextchar != L'\0')\r
+ {\r
+ d->optarg = d->__nextchar;\r
+ d->optind++;\r
+ }\r
+ else if (d->optind == argc)\r
+ {\r
+ if (print_errors)\r
+ {\r
+ fwprintf(stderr,L"%s: option requires an argument -- '%c'\n",argv[0], c);\r
+ }\r
+ d->optopt = c;\r
+ if (optstring[0] == L':')\r
+ c = L':';\r
+ else\r
+ c = L'?';\r
+ }\r
+ else\r
+ d->optarg = argv[d->optind++];\r
+ d->__nextchar = NULL;\r
+ }\r
+ }\r
+ return c;\r
+ }\r
+}\r
+int _getopt_internal_w (int argc, wchar_t *const *argv, const wchar_t *optstring, const struct option_w *longopts, int *longind, int long_only, int posixly_correct)\r
+{\r
+ int result;\r
+ getopt_data_w.optind = optind;\r
+ getopt_data_w.opterr = opterr;\r
+ result = _getopt_internal_r_w (argc, argv, optstring, longopts,longind, long_only, &getopt_data_w,posixly_correct);\r
+ optind = getopt_data_w.optind;\r
+ optarg_w = getopt_data_w.optarg;\r
+ optopt = getopt_data_w.optopt;\r
+ return result;\r
+}\r
+int getopt_w (int argc, wchar_t *const *argv, const wchar_t *optstring) _GETOPT_THROW\r
+{\r
+ return _getopt_internal_w (argc, argv, optstring, (const struct option_w *) 0, (int *) 0, 0, 0);\r
+}\r
+int getopt_long_w (int argc, wchar_t *const *argv, const wchar_t *options, const struct option_w *long_options, int *opt_index) _GETOPT_THROW\r
+{\r
+ return _getopt_internal_w (argc, argv, options, long_options, opt_index, 0, 0);\r
+}\r
+int getopt_long_only_w (int argc, wchar_t *const *argv, const wchar_t *options, const struct option_w *long_options, int *opt_index) _GETOPT_THROW\r
+{\r
+ return _getopt_internal_w (argc, argv, options, long_options, opt_index, 1, 0);\r
+}\r
+int _getopt_long_r_w (int argc, wchar_t *const *argv, const wchar_t *options, const struct option_w *long_options, int *opt_index, struct _getopt_data_w *d)\r
+{\r
+ return _getopt_internal_r_w (argc, argv, options, long_options, opt_index,0, d, 0);\r
+}\r
+int _getopt_long_only_r_w (int argc, wchar_t *const *argv, const wchar_t *options, const struct option_w *long_options, int *opt_index, struct _getopt_data_w *d)\r
+{\r
+ return _getopt_internal_r_w (argc, argv, options, long_options, opt_index, 1, d, 0);\r
+}
\ No newline at end of file
--- /dev/null
+#ifndef __WINGETOPT_H_\r
+ #define __WINGETOPT_H_\r
+\r
+ #ifdef _GETOPT_API\r
+ #undef _GETOPT_API\r
+ #endif\r
+\r
+ #if defined(EXPORTS_GETOPT) && defined(STATIC_GETOPT)\r
+ #error "The preprocessor definitions of EXPORTS_GETOPT and STATIC_GETOPT can only be used individually"\r
+ #elif defined(STATIC_GETOPT)\r
+ #define _GETOPT_API\r
+ #elif defined(EXPORTS_GETOPT)\r
+ #define _GETOPT_API __declspec(dllexport)\r
+ #else\r
+ #define _GETOPT_API __declspec(dllimport)\r
+ #endif\r
+\r
+ // Change behavior for C\C++\r
+ #ifdef __cplusplus\r
+ #define _BEGIN_EXTERN_C extern "C" {\r
+ #define _END_EXTERN_C }\r
+ #define _GETOPT_THROW throw()\r
+ #else\r
+ #define _BEGIN_EXTERN_C\r
+ #define _END_EXTERN_C\r
+ #define _GETOPT_THROW\r
+ #endif\r
+\r
+ // Standard GNU options\r
+ #define null_argument 0 /*Argument Null*/\r
+ #define no_argument 0 /*Argument Switch Only*/\r
+ #define required_argument 1 /*Argument Required*/\r
+ #define optional_argument 2 /*Argument Optional*/\r
+\r
+\r
+ // Shorter Options\r
+ #define ARG_NULL 0 /*Argument Null*/\r
+ #define ARG_NONE 0 /*Argument Switch Only*/\r
+ #define ARG_REQ 1 /*Argument Required*/\r
+ #define ARG_OPT 2 /*Argument Optional*/\r
+\r
+ #include <string.h>\r
+ #include <wchar.h>\r
+\r
+_BEGIN_EXTERN_C\r
+\r
+ extern _GETOPT_API int optind;\r
+ extern _GETOPT_API int opterr;\r
+ extern _GETOPT_API int optopt;\r
+\r
+ // Ansi\r
+ struct option_a\r
+ {\r
+ const char* name;\r
+ int has_arg;\r
+ int *flag;\r
+ int val;\r
+ };\r
+ extern _GETOPT_API char *optarg_a;\r
+ extern _GETOPT_API int getopt_a(int argc, char *const *argv, const char *optstring) _GETOPT_THROW;\r
+ extern _GETOPT_API int getopt_long_a(int argc, char *const *argv, const char *options, const struct option_a *long_options, int *opt_index) _GETOPT_THROW;\r
+ extern _GETOPT_API int getopt_long_only_a(int argc, char *const *argv, const char *options, const struct option_a *long_options, int *opt_index) _GETOPT_THROW;\r
+\r
+ // Unicode\r
+ struct option_w\r
+ {\r
+ const wchar_t* name;\r
+ int has_arg;\r
+ int *flag;\r
+ int val;\r
+ };\r
+ extern _GETOPT_API wchar_t *optarg_w;\r
+ extern _GETOPT_API int getopt_w(int argc, wchar_t *const *argv, const wchar_t *optstring) _GETOPT_THROW;\r
+ extern _GETOPT_API int getopt_long_w(int argc, wchar_t *const *argv, const wchar_t *options, const struct option_w *long_options, int *opt_index) _GETOPT_THROW;\r
+ extern _GETOPT_API int getopt_long_only_w(int argc, wchar_t *const *argv, const wchar_t *options, const struct option_w *long_options, int *opt_index) _GETOPT_THROW;\r
+\r
+_END_EXTERN_C\r
+\r
+ #undef _BEGIN_EXTERN_C\r
+ #undef _END_EXTERN_C\r
+ #undef _GETOPT_THROW\r
+ #undef _GETOPT_API\r
+\r
+ #ifdef _UNICODE\r
+ #define getopt getopt_w\r
+ #define getopt_long getopt_long_w\r
+ #define getopt_long_only getopt_long_only_w\r
+ #define option option_w\r
+ #define optarg optarg_w\r
+ #else\r
+ #define getopt getopt_a\r
+ #define getopt_long getopt_long_a\r
+ #define getopt_long_only getopt_long_only_a\r
+ #define option option_a\r
+ #define optarg optarg_a\r
+ #endif\r
+#endif // __WINGETOPT_H_
\ No newline at end of file
--- /dev/null
+AM_CXXFLAGS = -pthread
+
+SUBDIRS =
+
+SUBDIRS += src
+
+if ENABLE_TOOLS
+SUBDIRS += tools
+endif
+
+if USE_CYTHON
+SUBDIRS += python
+endif
+
+if ENABLE_TESTS
+SUBDIRS += tests
+endif
+
+if HAVE_DOXYGEN
+SUBDIRS += doc
+endif
+
+ACLOCAL_AMFLAGS = -I m4
+
+DOC_FILES = \
+ README.md \
+ COPYING
+
+EXTRA_DIST = \
+ $(DOC_FILES)
+
+pkgconfigdir = $(libdir)/pkgconfig
+pkgconfig_DATA = opendht.pc
--- /dev/null
+<img src="https://raw.githubusercontent.com/savoirfairelinux/opendht/master/resources/opendht_logo_512.png" width="100" align="right">
+<br>
+<h1 style="margin-top:10px">
+ <a id="user-content-opendht-" class="anchor" href="/savoirfairelinux/opendht/blob/master/README.md#opendht-" aria-hidden="true"></a>OpenDHT
+</h1>
+
+A lightweight C++11 Distributed Hash Table implementation.
+
+OpenDHT provides an easy to use distributed in-memory data store.
+Every node in the network can read and write values to the store.
+Values are distributed over the network, with redundancy.
+
+ * Lightweight and scalable, designed for large networks and small devices
+ * High resilience to network disruption
+ * Public key cryptography layer providing optional data signature and encryption (using GnuTLS)
+ * IPv4 and IPv6 support
+ * Clean and powerful C++11 map API
+ * Python 3 bindings
+ * REST API
+
+## Documentation
+See the wiki: <https://github.com/savoirfairelinux/opendht/wiki>
+
+#### How-to build and install
+
+Build instructions: <https://github.com/savoirfairelinux/opendht/wiki/Build-the-library>
+
+#### How-to build a simple client app
+```bash
+g++ main.cpp -std=c++11 -lopendht -lgnutls
+```
+
+## Examples
+### C++ example
+The `tools` directory includes simple example programs :
+* `dhtnode`, a command line tool, mostly used for debuging, allowing to perform operations supported by the library (get, put etc.) with text values.
+* `dhtchat`, a very simple IM client working over the dht.
+
+Example program launching a DHT node, connecting to the network and performing some basic operations:
+```c++
+#include <opendht.h>
+#include <vector>
+
+int main()
+{
+ dht::DhtRunner node;
+
+ // Launch a dht node on a new thread, using a
+ // generated RSA key pair, and listen on port 4222.
+ node.run(4222, dht::crypto::generateIdentity(), true);
+
+ // Join the network through any running node,
+ // here using a known bootstrap node.
+ node.bootstrap("bootstrap.ring.cx", "4222");
+
+ // put some data on the dht
+ std::vector<uint8_t> some_data(5, 10);
+ node.put("unique_key", some_data);
+
+ // put some data on the dht, signed with our generated private key
+ node.putSigned("unique_key_42", some_data);
+
+ // get data from the dht
+ node.get("other_unique_key", [](const std::vector<std::shared_ptr<dht::Value>>& values) {
+ // Callback called when values are found
+ for (const auto& value : values)
+ std::cout << "Found value: " << *value << std::endl;
+ return true; // return false to stop the search
+ });
+
+ // wait for dht threads to end
+ node.join();
+ return 0;
+}
+```
+### Python 3 example
+```python
+import opendht as dht
+
+node = dht.DhtRunner()
+node.run()
+
+# Join the network through any running node,
+# here using a known bootstrap node.
+node.bootstrap("bootstrap.ring.cx", "4222")
+
+# blocking call (provide callback arguments to make the call non-blocking)
+node.put(dht.InfoHash.get("unique_key"), dht.Value(b'some binary data'))
+
+results = node.get(dht.InfoHash.get("unique_key"))
+for r in results:
+ print(r)
+```
+
+## Dependencies
+- msgpack-c 1.2+, used for data serialization.
+- GnuTLS 3.3+, used for cryptographic operations.
+- Nettle 2.4+, a GnuTLS dependency for crypto.
+- (optional) restbed used for the REST API. commit fb84213e170bc171fecd825a8e47ed9f881a12cd (https://github.com/AmarOk1412/restbed/tree/async_read_until)
+- (optional) jsoncpp 1.7.4-3+, used for the REST API.
+- Build tested with GCC 5.2+ (GNU/Linux, Windows with MinGW), Clang/LLVM (GNU/Linux, Android, macOS, iOS).
+- Build tested with Microsoft Visual Studio 2015
+
+## Contact
+
+IRC: join us on Freenode at [`#opendht`](https://webchat.freenode.net/?channels=%23opendht).
+
+## License
+Copyright (C) 2014-2018 Savoir-faire Linux Inc.
+
+OpenDHT is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
+
+This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+See COPYING or https://www.gnu.org/licenses/gpl-3.0.en.html for the full GPLv3 license.
+
+## Acknowledgements
+This project was originally based on https://github.com/jech/dht by Juliusz Chroboczek.
+It is independent from another project called OpenDHT (Sean Rhea. Ph.D. Thesis, 2005), now extinct.
+
+## Donations
+We gratefully accept Bitcoin donations to support OpenDHT development at: `bitcoin:3EykSd1An888efq4Bq3KaV3hJ3JQ4FPnwm`.
--- /dev/null
+git submodule update --init
+autoreconf --install --verbose -Wall
--- /dev/null
+INCLUDE(FindPkgConfig)
+PKG_CHECK_MODULES(PC_CPPUNIT "cppunit")
+
+FIND_PATH(CPPUNIT_INCLUDE_DIRS
+ NAMES cppunit/TestCase.h
+ HINTS ${PC_CPPUNIT_INCLUDE_DIR}
+ ${CMAKE_INSTALL_PREFIX}/include
+ PATHS
+ /usr/local/include
+ /usr/include
+)
+
+FIND_LIBRARY(CPPUNIT_LIBRARIES
+ NAMES cppunit
+ HINTS ${PC_CPPUNIT_LIBDIR}
+ ${CMAKE_INSTALL_PREFIX}/lib
+ ${CMAKE_INSTALL_PREFIX}/lib64
+ PATHS
+ ${CPPUNIT_INCLUDE_DIRS}/../lib
+ /usr/local/lib
+ /usr/lib
+)
+
+LIST(APPEND CPPUNIT_LIBRARIES ${CMAKE_DL_LIBS})
+
+INCLUDE(FindPackageHandleStandardArgs)
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(CPPUNIT DEFAULT_MSG CPPUNIT_LIBRARIES CPPUNIT_INCLUDE_DIRS)
+MARK_AS_ADVANCED(CPPUNIT_LIBRARIES CPPUNIT_INCLUDE_DIRS)
--- /dev/null
+# - Try to find msgpack
+# Once done this will define
+# MSGPACK_FOUND - System has msgpack
+# MSGPACK_INCLUDE_DIRS - The msgpack include directories
+# MSGPACK_LIBRARIES - The libraries needed to use msgpack
+
+if(NOT MSGPACK_USE_BUNDLED)
+ find_package(PkgConfig)
+ if (PKG_CONFIG_FOUND)
+ pkg_search_module(PC_MSGPACK QUIET
+ msgpackc>=${Msgpack_FIND_VERSION}
+ msgpack>=${Msgpack_FIND_VERSION})
+ endif()
+else()
+ set(PC_MSGPACK_INCLUDEDIR)
+ set(PC_MSGPACK_INCLUDE_DIRS)
+ set(PC_MSGPACK_LIBDIR)
+ set(PC_MSGPACK_LIBRARY_DIRS)
+ set(LIMIT_SEARCH NO_DEFAULT_PATH)
+endif()
+
+set(MSGPACK_DEFINITIONS ${PC_MSGPACK_CFLAGS_OTHER})
+
+find_path(MSGPACK_INCLUDE_DIR msgpack/version_master.h
+ HINTS ${PC_MSGPACK_INCLUDEDIR} ${PC_MSGPACK_INCLUDE_DIRS}
+ ${LIMIT_SEARCH})
+
+if(MSGPACK_INCLUDE_DIR)
+ file(READ ${MSGPACK_INCLUDE_DIR}/msgpack/version_master.h msgpack_version_h)
+ string(REGEX REPLACE ".*MSGPACK_VERSION_MAJOR +([0-9]+).*" "\\1" MSGPACK_VERSION_MAJOR "${msgpack_version_h}")
+ string(REGEX REPLACE ".*MSGPACK_VERSION_MINOR +([0-9]+).*" "\\1" MSGPACK_VERSION_MINOR "${msgpack_version_h}")
+ string(REGEX REPLACE ".*MSGPACK_VERSION_REVISION +([0-9]+).*" "\\1" MSGPACK_VERSION_REVISION "${msgpack_version_h}")
+ set(MSGPACK_VERSION_STRING "${MSGPACK_VERSION_MAJOR}.${MSGPACK_VERSION_MINOR}.${MSGPACK_VERSION_REVISION}")
+else()
+ set(MSGPACK_VERSION_STRING)
+endif()
+
+# If we're asked to use static linkage, add libmsgpack{,c}.a as a preferred library name.
+if(MSGPACK_USE_STATIC)
+ list(APPEND MSGPACK_NAMES
+ "${CMAKE_STATIC_LIBRARY_PREFIX}msgpackc${CMAKE_STATIC_LIBRARY_SUFFIX}"
+ "${CMAKE_STATIC_LIBRARY_PREFIX}msgpack${CMAKE_STATIC_LIBRARY_SUFFIX}")
+endif()
+
+list(APPEND MSGPACK_NAMES msgpackc msgpack)
+
+find_library(MSGPACK_LIBRARY NAMES ${MSGPACK_NAMES}
+ # Check each directory for all names to avoid using headers/libraries from
+ # different places.
+ NAMES_PER_DIR
+ HINTS ${PC_MSGPACK_LIBDIR} ${PC_MSGPACK_LIBRARY_DIRS}
+ ${LIMIT_SEARCH})
+
+mark_as_advanced(MSGPACK_INCLUDE_DIR MSGPACK_LIBRARY)
+
+set(MSGPACK_LIBRARIES ${MSGPACK_LIBRARY})
+set(MSGPACK_INCLUDE_DIRS ${MSGPACK_INCLUDE_DIR})
+
+include(FindPackageHandleStandardArgs)
+# handle the QUIETLY and REQUIRED arguments and set MSGPACK_FOUND to TRUE
+# if all listed variables are TRUE
+find_package_handle_standard_args(Msgpack
+ REQUIRED_VARS MSGPACK_LIBRARY MSGPACK_INCLUDE_DIR
+ VERSION_VAR MSGPACK_VERSION_STRING)
--- /dev/null
+# - Try to find readline, a library for easy editing of command lines.
+# Variables used by this module:
+# READLINE_ROOT_DIR - Readline root directory
+# Variables defined by this module:
+# READLINE_FOUND - system has Readline
+# READLINE_INCLUDE_DIR - the Readline include directory (cached)
+# READLINE_INCLUDE_DIRS - the Readline include directories
+# (identical to READLINE_INCLUDE_DIR)
+# READLINE_LIBRARY - the Readline library (cached)
+# READLINE_LIBRARIES - the Readline library plus the libraries it
+# depends on
+
+# Copyright (C) 2009
+# ASTRON (Netherlands Institute for Radio Astronomy)
+# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
+#
+# This program is free software; you can redistribute it and/or modify
+# modify it under the terms of the GNU General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# $Id: FindReadline.cmake 15228 2010-03-16 09:27:26Z loose $
+
+if(NOT READLINE_FOUND)
+
+ find_path(READLINE_INCLUDE_DIR readline/readline.h
+ HINTS ${READLINE_ROOT_DIR} PATH_SUFFIXES include)
+ find_library(READLINE_LIBRARY readline
+ HINTS ${READLINE_ROOT_DIR} PATH_SUFFIXES lib)
+ find_library(NCURSES_LIBRARY ncurses) # readline depends on libncurses
+ mark_as_advanced(READLINE_INCLUDE_DIR READLINE_LIBRARY NCURSES_LIBRARY)
+
+ include(FindPackageHandleStandardArgs)
+ find_package_handle_standard_args(Readline DEFAULT_MSG
+ READLINE_LIBRARY NCURSES_LIBRARY READLINE_INCLUDE_DIR)
+
+ set(READLINE_INCLUDE_DIRS ${READLINE_INCLUDE_DIR})
+ set(READLINE_LIBRARIES ${READLINE_LIBRARY} ${NCURSES_LIBRARY})
+
+endif(NOT READLINE_FOUND)
--- /dev/null
+if(NOT Restbed_FOUND)
+ find_path (Restbed_INCLUDE_DIR restbed
+ HINTS
+ "/usr/include"
+ "/usr/local/include"
+ "/opt/local/include")
+ find_library(Restbed_LIBRARY restbed
+ HINTS ${Restbed_ROOT_DIR} PATH_SUFFIXES lib)
+ include(FindPackageHandleStandardArgs)
+ find_package_handle_standard_args(Restbed DEFAULT_MSG Restbed_LIBRARY Restbed_INCLUDE_DIR)
+ if (Restbed_INCLUDE_DIR)
+ set(Restbed_FOUND TRUE)
+ set(Restbed_LIBRARIES ${Restbed_LIBRARY})
+ set(Restbed_INCLUDE_DIRS ${Restbed_INCLUDE_DIR})
+ endif()
+endif()
--- /dev/null
+dnl define macros
+m4_define([opendht_major_version], 1)
+m4_define([opendht_minor_version], 8)
+m4_define([opendht_patch_version], 1)
+m4_define([opendht_version],
+ [opendht_major_version.opendht_minor_version.opendht_patch_version])
+
+AC_INIT(opendht, [opendht_version])
+AC_CONFIG_AUX_DIR(ac)
+AM_INIT_AUTOMAKE([foreign subdir-objects])
+AC_CONFIG_HEADERS([config.h])
+AC_CONFIG_MACRO_DIR([m4])
+AC_CANONICAL_HOST
+
+AC_SUBST(OPENDHT_MAJOR_VERSION, opendht_major_version)
+AC_SUBST(OPENDHT_MINOR_VERSION, opendht_minor_version)
+AC_SUBST(OPENDHT_PATCH_VERSION, opendht_patch_version)
+
+AC_ARG_ENABLE([debug], AS_HELP_STRING([--enable-debug], [Build in debug mode, adds stricter warnings, disables optimization]))
+AS_IF([test "x$enable_debug" = "xyes"],
+ [CXXFLAGS="${CXXFLAGS} -g -Wno-return-type -Wall -Wextra -Wnon-virtual-dtor -O0 -pedantic-errors"],
+ [CXXFLAGS="${CXXFLAGS} -O3 -pedantic-errors"])
+
+AC_PROG_CXX
+AM_PROG_AR
+
+dnl Check for logs
+AC_ARG_ENABLE([logs], [AS_HELP_STRING([--disable-logs], [Disable DHT logs])])
+AS_IF([test "x$enable_logs" != "xno"], [
+ AC_DEFINE([OPENDHT_LOG], [true], [Define if DHT logs are enabled])
+], [
+ AC_DEFINE([OPENDHT_LOG], [false], [Define if DHT logs are enabled])
+])
+
+dnl Check for indexation
+AC_ARG_ENABLE([indexation], [AS_HELP_STRING([--disable-indexation], [Disable DHT indexation])])
+AM_CONDITIONAL(ENABLE_INDEXATION, test x$enable_indexation != "xno")
+AS_IF([test "x$enable_indexation" != "xno"], [
+ AC_DEFINE([OPENDHT_INDEXATION], [1], [Define if DHT indexation is enabled])
+])
+
+dnl Check for Doxygen
+AC_ARG_ENABLE([doc], AS_HELP_STRING([--enable-doc], [Enable documentation generation (doxygen)]))
+AS_IF([test "x$enable_doc" = "xyes"], [
+ AC_CHECK_PROGS([DOXYGEN], [doxygen])
+ AS_IF([test -z "$DOXYGEN"], [AC_MSG_WARN([Doxygen not found - continuing without Doxygen support])])
+])
+AM_CONDITIONAL([HAVE_DOXYGEN], [test -n "$DOXYGEN"])
+
+dnl Check for Python
+AC_ARG_ENABLE([python], [AS_HELP_STRING([--disable-python], [Disble python binding])])
+AS_IF([test "x$enable_python" != "xno"], [
+ AM_PATH_PYTHON([3.3],, [:])
+ AS_IF([test -n "$PYTHON"],[
+ echo 'import Cython' | $PYTHON
+ AS_IF([test $? == 0],[CYTHON=yes],[AC_MSG_WARN([Cython not found - continuing without python support])])
+ AC_CHECK_PROGS([PIP], [pip3])
+ AS_IF([test -z "$PIP"],[AC_MSG_WARN([pip not found - continuing without python uninstall support])])
+ ])
+ ])
+AM_CONDITIONAL([USE_CYTHON], [test -n "$CYTHON"])
+AM_CONDITIONAL([HAVE_PIP], [test -n "$PIP"])
+
+case "${host_os}" in
+ "")
+ SYS=unknown
+ ;;
+ *android*)
+ SYS=android
+ ;;
+ linux*)
+ SYS=linux
+ ;;
+ darwin*)
+ SYS=darwin
+ ;;
+ mingw32*)
+ SYS=mingw32
+ WIN32=1
+ AC_DEFINE([_POSIX_SOURCE], [1], [IEEE Std 1003.1.])
+ AC_DEFINE([_POSIX_C_SOURCE], [200809L], [IEEE Std 1003.1.])
+ AC_DEFINE([_XOPEN_SOURCE], [700], [POSIX and XPG 7th edition])
+ AC_DEFINE([_XOPEN_SOURCE_EXTENDED], [1], [XPG things and X/Open Unix extensions.])
+ AC_DEFINE([_BSD_SOURCE], [1], [ISO C, POSIX, and 4.3BSD things.])
+ LDFLAGS="${LDFLAGS} -lws2_32"
+ AC_SUBST(WINDOWS_ARCH)
+ AC_SUBST(PROGRAMFILES)
+ ;;
+ *)
+ SYS="${host_os}"
+ ;;
+esac
+
+AM_CONDITIONAL(WIN32, [test "x$SYS" = "xmingw32"])
+AS_IF([test "x$SYS" = "xandroid"],
+ [], [LDFLAGS="${LDFLAGS} -lpthread"])
+
+LT_INIT()
+LT_LANG(C++)
+
+AX_CXX_COMPILE_STDCXX(11,[noext],[mandatory])
+
+PKG_PROG_PKG_CONFIG()
+
+AC_ARG_ENABLE([proxy_server], AS_HELP_STRING([--enable-proxy-server], [Enable proxy server ability]), proxy_server=yes, proxy_server=no)
+AM_CONDITIONAL(ENABLE_PROXY_SERVER, test x$proxy_server == xyes)
+
+AC_ARG_ENABLE([push_notifications], AS_HELP_STRING([--enable-push-notifications], [Enable push notifications support]), push_notifications=yes, push_notifications=no)
+AM_CONDITIONAL(ENABLE_PUSH_NOTIFICATIONS, test x$push_notifications == xyes)
+
+AC_ARG_ENABLE([proxy_server_identity], AS_HELP_STRING([--enable-proxy-server-identity],
+ [Enable proxy server ability]), proxy_server_identity=yes, proxy_server_identity=no)
+AM_CONDITIONAL(ENABLE_PROXY_SERVER_IDENTITY, test x$proxy_server_identity == xyes -a x$proxy_server == xyes)
+AC_ARG_ENABLE([proxy_server_identity], AS_HELP_STRING([--enable-proxy-server-identity],
+ [Enable proxy server ability]), proxy_server_identity=yes, proxy_server_identity=no)
+
+AC_ARG_ENABLE([proxy_client], AS_HELP_STRING([--enable-proxy-client], [Enable proxy client ability]), proxy_client=yes, proxy_client=no)
+AM_CONDITIONAL(ENABLE_PROXY_CLIENT, test x$proxy_client == xyes)
+
+AC_ARG_ENABLE([tests], AS_HELP_STRING([--enable-tests], [Enable tests]), build_tests=yes, build_tests=no)
+AM_CONDITIONAL(ENABLE_TESTS, test x$build_tests == xyes)
+AM_COND_IF([ENABLE_TESTS], [
+ PKG_CHECK_MODULES([CppUnit], [cppunit >= 1.12])
+])
+
+AM_CONDITIONAL(PROXY_CLIENT_OR_SERVER, test x$proxy_client == xyes | test x$proxy_server == xyes)
+
+PKG_CHECK_MODULES([Nettle], [nettle >= 2.4])
+PKG_CHECK_MODULES([GnuTLS], [gnutls >= 3.3])
+PKG_CHECK_MODULES([MsgPack], [msgpack >= 1.2])
+
+AC_ARG_WITH([jsoncpp], AS_HELP_STRING([--without-jsoncpp], [Build without JsonCpp support]))
+AS_IF([test "x$with_jsoncpp" != "xno"],
+ [PKG_CHECK_MODULES([JsonCpp], [jsoncpp >= 1.7.2], [have_jsoncpp=yes], [have_jsoncpp=no])],
+ [have_jsoncpp=no])
+AS_IF([test "x$have_jsoncpp" = "xyes"], [
+ AC_MSG_NOTICE([Using JsonCpp])
+ CPPFLAGS+=" -DOPENDHT_JSONCPP=1"
+], [
+ AC_MSG_NOTICE([Not using JsonCpp])
+ AM_COND_IF(PROXY_CLIENT_OR_SERVER, AC_MSG_ERROR(["JsonCpp is required for proxy/push notification support"]))
+])
+
+AM_COND_IF([PROXY_CLIENT_OR_SERVER], [
+ AC_CHECK_LIB(restbed, exit,, AC_MSG_ERROR([Missing restbed files]))
+ LDFLAGS="${LDFLAGS} -lrestbed"
+])
+
+CXXFLAGS="${CXXFLAGS} -DMSGPACK_DISABLE_LEGACY_NIL -DMSGPACK_DISABLE_LEGACY_CONVERT"
+
+dnl Check for Argon2
+AC_ARG_WITH([argon2], AS_HELP_STRING([--without-argon2], [Use included Argon2]))
+AS_IF([test "x$with_argon2" != "xno"],
+ [PKG_CHECK_MODULES([Argon2], [libargon2], [have_argon2=yes], [have_argon2=no])],
+ [have_argon2=no])
+AS_IF([test "x$have_argon2" = "xyes"], [
+ AC_MSG_NOTICE([Using system Argon2])
+ AC_SUBST(argon2_lib, [", libargon2"])
+], [
+ AS_IF([test "x$with_argon2" = "xyes"], [
+ AC_MSG_ERROR([Argon2 requested but not found])
+ ],[
+ AC_MSG_NOTICE([Using included Argon2])
+ AC_SUBST(Argon2_CFLAGS, "-I\${top_srcdir}/argon2/src -I\${top_srcdir}/argon2/include")
+ AC_SUBST(Argon2_LIBS, "libargon2.la")
+ AC_SUBST(Argon2_LDFLAGS, "-L\${abs_top_srcdir}/argon2/src/.libs")
+ ])
+])
+
+AM_CONDITIONAL([WITH_INCLUDED_ARGON2], [test "x$have_argon2" = "xno"])
+
+AC_ARG_ENABLE([tools], AS_HELP_STRING([--disable-tools],[Disable tools (CLI DHT node)]),,build_tools=yes)
+AM_CONDITIONAL(ENABLE_TOOLS, test x$build_tools == xyes)
+AM_COND_IF([ENABLE_TOOLS], [
+ AC_CHECK_HEADERS([readline/readline.h readline/history.h], [], [
+ AC_MSG_ERROR([unable to find readline.h])
+ ])
+])
+
+AM_COND_IF(ENABLE_PROXY_SERVER,
+ [CPPFLAGS+=" -DOPENDHT_PROXY_SERVER=true"],
+ [CPPFLAGS+=" -DOPENDHT_PROXY_SERVER=false"])
+
+AM_COND_IF(ENABLE_PROXY_CLIENT,
+ [CPPFLAGS+=" -DOPENDHT_PROXY_CLIENT=true"],
+ [CPPFLAGS+=" -DOPENDHT_PROXY_CLIENT=false"])
+
+AM_COND_IF(ENABLE_PUSH_NOTIFICATIONS,
+ [CPPFLAGS+=" -DOPENDHT_PUSH_NOTIFICATIONS=true"],
+ [CPPFLAGS+=" -DOPENDHT_PUSH_NOTIFICATIONS=false"])
+
+AM_COND_IF(ENABLE_PROXY_SERVER_IDENTITY,
+ [CPPFLAGS+=" -DOPENDHT_PROXY_SERVER_IDENTITY=true"],
+ [CPPFLAGS+=" -DOPENDHT_PROXY_SERVER_IDENTITY=false"])
+
+AM_COND_IF([HAVE_DOXYGEN], [
+ AC_CONFIG_FILES([doc/Doxyfile doc/Makefile])
+])
+
+dnl Configure setup.py if we build the python module
+AM_COND_IF([USE_CYTHON], [
+ AC_SUBST(CURRENT_SOURCE_DIR, ".")
+ AC_SUBST(CURRENT_BINARY_DIR, ".")
+ AC_SUBST(PROJECT_SOURCE_DIR, "..")
+ AC_SUBST(PROJECT_BINARY_DIR, "../src/.libs")
+ AC_CONFIG_FILES([python/Makefile python/setup.py])
+])
+
+AC_CONFIG_FILES([Makefile
+ src/Makefile
+ tools/Makefile
+ tests/Makefile
+ opendht.pc])
+AC_OUTPUT
--- /dev/null
+if (OPENDHT_TOOLS)
+ INSTALL(FILES ${CMAKE_CURRENT_SOURCE_DIR}/dhtnode.1 DESTINATION ${CMAKE_INSTALL_PREFIX}/share/man/man1)
+endif ()
+
+if (OPENDHT_DOCUMENTATION)
+ if (NOT DOXYGEN_FOUND)
+ message(FATAL_ERROR "Doxygen is needed to build the documentation.")
+ endif()
+ configure_file (Doxyfile.in Doxyfile @ONLY)
+ add_custom_target(doc ALL
+ COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMENT "Generating API documentation with Doxygen"
+ VERBATIM)
+ install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/html DESTINATION share/doc/opendht)
+endif()
--- /dev/null
+# Doxyfile 1.8.9.1
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for this project.
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+DOXYFILE_ENCODING = UTF-8
+
+PROJECT_NAME = @PACKAGE_NAME@
+PROJECT_NUMBER = @PACKAGE_VERSION@
+PROJECT_BRIEF = "C++ Distributed Hash Table"
+PROJECT_LOGO =
+
+OUTPUT_DIRECTORY =
+CREATE_SUBDIRS = NO
+ALLOW_UNICODE_NAMES = NO
+OUTPUT_LANGUAGE = English
+BRIEF_MEMBER_DESC = YES
+REPEAT_BRIEF = YES
+ABBREVIATE_BRIEF =
+ALWAYS_DETAILED_SEC = NO
+INLINE_INHERITED_MEMB = NO
+FULL_PATH_NAMES = YES
+STRIP_FROM_PATH =
+STRIP_FROM_INC_PATH =
+SHORT_NAMES = NO
+JAVADOC_AUTOBRIEF = NO
+QT_AUTOBRIEF = NO
+MULTILINE_CPP_IS_BRIEF = NO
+INHERIT_DOCS = YES
+SEPARATE_MEMBER_PAGES = NO
+TAB_SIZE = 4
+ALIASES =
+TCL_SUBST =
+OPTIMIZE_OUTPUT_FOR_C = NO
+OPTIMIZE_OUTPUT_JAVA = NO
+OPTIMIZE_FOR_FORTRAN = NO
+OPTIMIZE_OUTPUT_VHDL = NO
+EXTENSION_MAPPING =
+MARKDOWN_SUPPORT = YES
+AUTOLINK_SUPPORT = YES
+BUILTIN_STL_SUPPORT = YES
+CPP_CLI_SUPPORT = NO
+SIP_SUPPORT = NO
+IDL_PROPERTY_SUPPORT = YES
+DISTRIBUTE_GROUP_DOC = NO
+SUBGROUPING = YES
+INLINE_GROUPED_CLASSES = NO
+INLINE_SIMPLE_STRUCTS = NO
+TYPEDEF_HIDES_STRUCT = NO
+LOOKUP_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+EXTRACT_ALL = NO
+EXTRACT_PRIVATE = NO
+EXTRACT_PACKAGE = NO
+EXTRACT_STATIC = YES
+EXTRACT_LOCAL_CLASSES = YES
+EXTRACT_LOCAL_METHODS = NO
+EXTRACT_ANON_NSPACES = NO
+HIDE_UNDOC_MEMBERS = NO
+HIDE_UNDOC_CLASSES = NO
+HIDE_FRIEND_COMPOUNDS = NO
+HIDE_IN_BODY_DOCS = NO
+INTERNAL_DOCS = NO
+CASE_SENSE_NAMES = YES
+HIDE_SCOPE_NAMES = NO
+HIDE_COMPOUND_REFERENCE= NO
+SHOW_INCLUDE_FILES = YES
+SHOW_GROUPED_MEMB_INC = NO
+FORCE_LOCAL_INCLUDES = NO
+INLINE_INFO = YES
+SORT_MEMBER_DOCS = YES
+SORT_BRIEF_DOCS = NO
+SORT_MEMBERS_CTORS_1ST = NO
+SORT_GROUP_NAMES = NO
+SORT_BY_SCOPE_NAME = NO
+STRICT_PROTO_MATCHING = NO
+GENERATE_TODOLIST = YES
+GENERATE_TESTLIST = YES
+GENERATE_BUGLIST = YES
+GENERATE_DEPRECATEDLIST= YES
+ENABLED_SECTIONS =
+MAX_INITIALIZER_LINES = 30
+SHOW_USED_FILES = YES
+SHOW_FILES = YES
+SHOW_NAMESPACES = YES
+FILE_VERSION_FILTER =
+LAYOUT_FILE =
+CITE_BIB_FILES =
+
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+QUIET = YES
+WARNINGS = YES
+WARN_IF_UNDOCUMENTED = NO
+WARN_IF_DOC_ERROR = YES
+WARN_NO_PARAMDOC = NO
+WARN_FORMAT = "$file:$line: $text"
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+
+INPUT = @top_srcdir@/include
+INPUT_ENCODING = UTF-8
+FILE_PATTERNS = *.cpp *.h
+RECURSIVE = YES
+
+EXCLUDE = @top_srcdir@/include/opendht/serialize.h
+EXCLUDE_SYMLINKS = NO
+EXCLUDE_PATTERNS =
+EXCLUDE_SYMBOLS =
+
+EXAMPLE_PATH =
+EXAMPLE_PATTERNS =
+EXAMPLE_RECURSIVE = NO
+
+IMAGE_PATH =
+INPUT_FILTER =
+FILTER_PATTERNS =
+FILTER_SOURCE_FILES = NO
+FILTER_SOURCE_PATTERNS =
+USE_MDFILE_AS_MAINPAGE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
+SOURCE_BROWSER = YES
+INLINE_SOURCES = NO
+STRIP_CODE_COMMENTS = YES
+REFERENCED_BY_RELATION = NO
+REFERENCES_RELATION = NO
+REFERENCES_LINK_SOURCE = YES
+SOURCE_TOOLTIPS = YES
+USE_HTAGS = NO
+VERBATIM_HEADERS = YES
+CLANG_ASSISTED_PARSING = NO
+CLANG_OPTIONS =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+ALPHABETICAL_INDEX = NO
+COLS_IN_ALPHA_INDEX = 5
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
+# The default value is: YES.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_STYLESHEET =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# cascading style sheets that are included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefore more robust against future updates.
+# Doxygen will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list). For an example see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET =
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the style sheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_TIMESTAMP = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_DOCSET = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID = com.savoirfairelinux
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME = Savoir-faire Linux
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_HTMLHELP = NO
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_FILE =
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler (hhc.exe). If non-empty,
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+HHC_LOCATION =
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated
+# (YES) or that it should be included in the master .chm file (NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+GENERATE_CHI = NO
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_INDEX_ENCODING =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated
+# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
+# enables the Previous and Next buttons.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_NAMESPACE = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_SECT_FILTER_ATTRS =
+
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID = org.doxygen.Project
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+DISABLE_INDEX = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_TREEVIEW = NO
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+TREEVIEW_WIDTH = 250
+
+# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW = NO
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_FONTSIZE = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_TRANSPARENT = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# http://www.mathjax.org) which uses client side Javascript for the rendering
+# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from http://www.mathjax.org before deployment.
+# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SEARCHENGINE = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using Javascript. There
+# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
+# setting. When disabled, doxygen will generate a PHP script for searching and
+# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
+# and searching needs to be provided by external tools. See the section
+# "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SERVER_BASED_SEARCH = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
+# The default value is: YES.
+
+GENERATE_LATEX = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when enabling USE_PDFLATEX this option is only used for generating
+# bitmaps for formulas in the HTML output, but not in the Makefile that is
+# written to the output directory.
+# The default file is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PAPER_TYPE = a4
+
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. To get the times font for
+# instance you can specify
+# EXTRA_PACKAGES=times
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
+# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
+# string, for the replacement values of the other commands the user is referred
+# to HTML_HEADER.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HEADER =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer. See
+# LATEX_HEADER for more information on how to generate a default footer and what
+# special commands can be used inside the footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_FOOTER =
+
+# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# LaTeX style sheets that are included after the standard style sheets created
+# by doxygen. Using this option one can overrule certain style aspects. Doxygen
+# will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_STYLESHEET =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES, to get a
+# higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BATCHMODE = NO
+
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HIDE_INDICES = NO
+
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_SOURCE_CODE = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BIB_STYLE = plain
+
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+GENERATE_RTF = NO
+RTF_OUTPUT = rtf
+COMPACT_RTF = NO
+RTF_HYPERLINKS = NO
+RTF_STYLESHEET_FILE =
+RTF_EXTENSIONS_FILE =
+RTF_SOURCE_CODE = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+GENERATE_MAN = NO
+MAN_OUTPUT = man
+MAN_EXTENSION = .3
+MAN_SUBDIR =
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+GENERATE_XML = NO
+XML_OUTPUT = xml
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+GENERATE_DOCBOOK = NO
+DOCBOOK_OUTPUT = docbook
+DOCBOOK_PROGRAMLISTING = NO
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+GENERATE_PERLMOD = NO
+PERLMOD_LATEX = NO
+PERLMOD_PRETTY = YES
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+ENABLE_PREPROCESSING = YES
+MACRO_EXPANSION = NO
+EXPAND_ONLY_PREDEF = NO
+SEARCH_INCLUDES = YES
+INCLUDE_PATH =
+INCLUDE_FILE_PATTERNS =
+PREDEFINED =
+EXPAND_AS_DEFINED =
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to external references
+#---------------------------------------------------------------------------
+
+TAGFILES =
+GENERATE_TAGFILE =
+ALLEXTERNALS = NO
+EXTERNAL_GROUPS = YES
+EXTERNAL_PAGES = YES
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+CLASS_DIAGRAMS = YES
+MSCGEN_PATH =
+DIA_PATH =
+HIDE_UNDOC_RELATIONS = YES
+HAVE_DOT = YES
+DOT_NUM_THREADS = 0
+DOT_FONTNAME = Helvetica
+DOT_FONTSIZE = 10
+DOT_FONTPATH =
+CLASS_GRAPH = YES
+COLLABORATION_GRAPH = YES
+GROUP_GRAPHS = YES
+UML_LOOK = NO
+UML_LIMIT_NUM_FIELDS = 10
+TEMPLATE_RELATIONS = NO
+INCLUDE_GRAPH = YES
+INCLUDED_BY_GRAPH = YES
+CALL_GRAPH = NO
+CALLER_GRAPH = NO
+GRAPHICAL_HIERARCHY = YES
+DIRECTORY_GRAPH = YES
+DOT_IMAGE_FORMAT = png
+INTERACTIVE_SVG = NO
+DOT_PATH =
+DOTFILE_DIRS =
+MSCFILE_DIRS =
+DIAFILE_DIRS =
+PLANTUML_JAR_PATH =
+PLANTUML_INCLUDE_PATH =
+DOT_GRAPH_MAX_NODES = 50
+MAX_DOT_GRAPH_DEPTH = 0
+DOT_TRANSPARENT = NO
+DOT_MULTI_TARGETS = NO
+GENERATE_LEGEND = YES
+DOT_CLEANUP = YES
--- /dev/null
+dist_man1_MANS = dhtnode.1
+
+if HAVE_DOXYGEN
+doxyfile.stamp:
+ $(DOXYGEN) Doxyfile
+ echo stamp > doxyfile.stamp
+
+CLEANFILES = doxyfile.stamp
+
+all-local: doxyfile.stamp
+clean-local:
+ rm -rf $(top_srcdir)/doc/man $(top_srcdir)/doc/html
+endif
--- /dev/null
+\documentclass[11pt]{article}
+
+\usepackage[utf8x]{inputenc}
+\usepackage[top=2cm,bottom=2cm]{geometry}
+
+\usepackage{forloop}
+\newcounter{counter}
+
+% maths
+\usepackage{amssymb}
+\usepackage{amsmath}
+\usepackage{mathrsfs}
+\usepackage{shadethm}
+\usepackage{amsthm}
+\newshadetheorem{shadeDef}{Definition}[section]
+\newtheorem{remark}{Remark}[section]
+\renewcommand{\P}{\mathbb{P}}
+
+\usepackage{float}
+\usepackage{booktabs}
+
+\setlength{\parindent}{0ex}
+\setlength{\parskip}{0.5em}
+
+\begin{document}
+ \title{Annex 1: Probabilistic analysis of connectivity changes}
+ \author{Adrien Béraud, Simon Désaulniers, Guillaume Roguez}
+ \maketitle
+ \pagestyle{empty}
+ \begin{shadeDef}
+ A node flagged as \emph{``expired''} by a node $n$ is a node which has not responded to any
+ of $n$'s last three requests.
+ \end{shadeDef}
+ \begin{remark}
+ An expired node will not be contacted before 10 minutes from its expiration time.
+ \end{remark}
+
+ Let $N$ the DHT network, $n_0\in N$, a given node and the following probabilistic events:
+ \begin{itemize}
+ \item $A$: $\forall n \in N$ $n$ is unreachable by $n_0$, \emph{i.e.} $n_0$ lost connection
+ with $N$;
+ \item $B$: $S\subset N$, the nodes unreachable by $n_0$ with $k={|S|\over|N|}$;
+ \item $C$: $m \le |N|$ nodes are flagged as ``expired''.
+ \end{itemize}
+
+ We are interested in knowing $\P(A|C)$, \emph{i.e.} the probability of the event where $A$ occurs
+ prior to $C$. From the above, we immediately get
+ $$\left\{
+ \begin{array}{ll}
+ \P(C|A) & = 1\\
+ \P(A) + \P(B) & = 1
+ \end{array}
+ \right.$$
+ Also, the event $A|C$ can be abstracted as the urn problem of draw without replacement. Then,
+ $$\P(C|B) = \prod_{i=0}^m \left[k|N| - i \over |N|\right] = \prod_{i=0}^m \left[k - { i \over |N| }\right]$$
+ Furthermore, using Bayes' theroem we have
+ \begin{align*}
+ \P(A|C) & = { \P(C|A)\P(A) \over \P(C|A)\P(A) + \P(C|B)\P(B)}\\
+ & = { \P(A) \over \P(A) + \P(C|B)\P(B) }\\
+ & = { \P(A) \over \P(A) + \P(C|B)\left[1 - \P(A)\right] }\\
+ \Rightarrow \forloop{counter}{0}{\value{counter} < 5}{\qquad}\quad \P(A) & =
+ \P(A|C)\left[\P(A) + \P(C|B)\left(1 - \P(A)\right) \right] \\
+ \Rightarrow \forloop{counter}{0}{\value{counter} < 2}{\qquad}\;\:\, \P(A)\left[{ 1 \over \P(A|C)} - 1\right] & =
+ \P(C|B)\left(1 - \P(A)\right) \\
+ \end{align*}
+ Finally,
+ \begin{equation}
+ \label{eq:final}
+ \left[{\P(A) \over 1 - \P(A)}\right]\left[{ 1 \over \P(A|C)} - 1\right] =
+ \prod_{i=0}^m \left[k - { i \over |N| }\right]
+ \end{equation}
+
+ From \eqref{eq:final}, we may set a plausible configuration $\{\P(A),\P(A|C),k,|N|\}$ letting us
+ produce results such as in table \ref{tbl:k_1_2}, \ref{tbl:k_2_3} and \ref{tbl:k_3_4}.
+
+ \begin{table}[H]
+ \centering
+ \caption{The values for $m$ assuming $\P(A|C) \ge 0.95,\, k = {1 \over 2}$}
+ \label{tbl:k_1_2}
+ \begin{tabular}{lcccc}
+ \toprule
+ $|N| \diagdown \P(A)$ & ${1 \over 10 }$ & ${1 \over 100 }$ & ${1 \over 1000 }$ & ${1 \over 10000 }$\\
+ \midrule
+ $2^0$ & 1 & 1 & 1 & 1\\
+ $2^1$ & 1 & 1 & 1 & 1\\
+ $2^2$ & 2 & 2 & 2 & 2\\
+ $2^3$ & 4 & 4 & 4 & 4\\
+ $2^4$ & 5 & 6 & 7 & 8\\
+ $2^5$ & 5 & 7 & 9 & 10\\
+ $2^6$ & 6 & 9 & 11 & 13\\
+ $2^7$ & 6 & 9 & 12 & 14\\
+ $2^8$ & 7 & 10 & 13 & 16\\
+ $2^9$ & 7 & 10 & 13 & 16\\
+ $2^{10}$ & 7 & 10 & 13 & 17\\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ \begin{table}[H]
+ \centering
+ \caption{The values for $m$ assuming $\P(A|C) \ge 0.95,\, k = {2 \over 3}$}
+ \label{tbl:k_2_3}
+ \begin{tabular}{lcccc}
+ \toprule
+ $|N| \diagdown \P(A)$ & ${1 \over 10 }$ & ${1 \over 100 }$ & ${1 \over 1000 }$ & ${1 \over 10000 }$\\
+ \midrule
+ $2^0$ & 1 & 1 & 1 & 1\\
+ $2^1$ & 2 & 2 & 2 & 2\\
+ $2^2$ & 3 & 3 & 4 & 4\\
+ $2^3$ & 5 & 5 & 6 & 8\\
+ $2^4$ & 6 & 8 & 9 & 10\\
+ $2^5$ & 8 & 10 & 12 & 14\\
+ $2^6$ & 9 & 13 & 16 & 18\\
+ $2^7$ & 11 & 15 & 18 & 22\\
+ $2^8$ & 11 & 16 & 21 & 25\\
+ $2^9$ & 12 & 17 & 22 & 27\\
+ $2^{10}$ & 12 & 18 & 23 & 28\\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ \begin{table}[H]
+ \centering
+ \caption{The values for $m$ assuming $\P(A|C) \ge 0.95,\, k = {3 \over 4}$}
+ \label{tbl:k_3_4}
+ \begin{tabular}{lcccc}
+ \toprule
+ $|N| \diagdown \P(A)$ & ${1 \over 10 }$ & ${1 \over 100 }$ & ${1 \over 1000 }$ & ${1 \over 10000 }$\\
+ \midrule
+ $2^0$ & 1 & 1 & 1 & 1\\
+ $2^1$ & 2 & 2 & 2 & 2\\
+ $2^2$ & 3 & 3 & 3 & 3\\
+ $2^3$ & 5 & 6 & 6 & 6\\
+ $2^4$ & 7 & 9 & 10 & 11\\
+ $2^5$ & 10 & 12 & 14 & 16\\
+ $2^6$ & 12 & 16 & 19 & 22\\
+ $2^7$ & 14 & 19 & 23 & 27\\
+ $2^8$ & 15 & 21 & 27 & 32\\
+ $2^9$ & 16 & 23 & 30 & 36\\
+ $2^{10}$ & 17 & 24 & 31 & 38\\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+\end{document}
--- /dev/null
+.TH DHTNODE 1 2016-07-29
+
+.SH NAME
+.B dhtnode
+- a simple OpenDHT command line node runner.
+
+.SH SYNOPSIS
+.B dhtnode [-h]
+
+.B dhtnode [-v [-l \fIlogfile\fP]] [-i] [-d] [-n \fInetwork_id\fP] [-p \fIlocal_port\fP] [-b \fIbootstrap_host\fP[:\fIport\fP]]
+
+.SH DESCRIPTION
+
+This program runs a simple OpenDHT node in an interactive way. If you rather
+want to run the node in daemon mode, option \fB'-d'\fP is provided. When running
+in the interactive shell, you benefit from the readline capabilities for writing
+your commands such as command history. Here are the available commands in the
+interactive shell:
+
+.EE
+ h, help Print this help message.
+ q, quit Quit the program.
+ log Start/stop printing DHT logs.
+
+ Node information:
+ ll Print basic information and stats about the current node.
+ ls Print basic information about current searches.
+ ld Print basic information about currenty stored values on this node.
+ lr Print the full current routing table of this node
+
+ Operations on the DHT:
+ b ip:port Ping potential node at given IP address/port.
+ g [key] Get values at [key].
+ l [key] Listen for value changes at [key].
+ p [key] [str] Put string value at [key].
+ s [key] [str] Put string value at [key], signed with our generated
+ private key.
+ e [key] [dest] [str] Put string value at [key], encrypted for [dest] with
+ its public key (if found).
+
+.SH OPTIONS
+
+.TP
+\fB-h\fP
+Prints some help.
+
+.TP
+\fB-v\fP
+Enable the verbose mode (log to stdout by default)
+
+.TP
+\fB-l\fP \fIlog_file\fP
+Write log to file instead of stdout
+
+.TP
+\fB-i\fP
+Generate cryptographic identity for the node.
+
+.TP
+\fB-d\fP
+Run the program in daemon mode (will fork in the background).
+
+.TP
+\fB-n\fP \fInetwork_id\fP
+Specify the network id. This let you connect to distinct networks and prevents
+the merge of two different networks (available since OpenDHT v0.6.1).
+
+.TP
+\fB-p\fP \fIlocal_port\fP
+Use port \fIlocal_port\fP for the program to bind to.
+
+.TP
+\fB-b\fP \fIbootstrap_host\fP[:\fIport\fP]
+The program needs to be given a node to connect to the network. You use this
+option to provide the ip address of that node.
+
+.SH AUTHORS
+.TP
+Program written by
+.IP \(bu
+.\}
+Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+.TP
+Man page written by
+.IP \(bu
+.\}
+Simon Désaulniers <sim.desaulniers@gmail.com>
--- /dev/null
+FROM aberaud/opendht-deps
+MAINTAINER Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+RUN git clone https://github.com/savoirfairelinux/opendht.git \
+ && cd opendht && mkdir build && cd build \
+ && cmake .. -DCMAKE_INSTALL_PREFIX=/usr -DOPENDHT_PYTHON=On -DOPENDHT_LTO=On && make -j8 && make install \
+ && cd ../.. && rm -rf opendht
--- /dev/null
+FROM ubuntu:16.04
+MAINTAINER Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+RUN apt-get update && apt-get install -y build-essential cmake git wget libncurses5-dev libreadline-dev nettle-dev libgnutls28-dev libuv1-dev cython3 python3-dev libcppunit-dev libjsoncpp-dev libasio-dev libssl-dev python3-setuptools && apt-get clean
+
+# build restbed from sources
+RUN git clone --recursive https://github.com/corvusoft/restbed.git \
+ && cd restbed && mkdir build && cd build \
+ && cmake -DBUILD_TESTS=NO -DBUILD_EXAMPLES=NO -DBUILD_SSL=NO -DBUILD_SHARED=YES -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_LIBDIR=lib .. \
+ && make -j8 install \
+ && cd .. && rm -rf restbed
+
+#build msgpack from source
+RUN wget https://github.com/msgpack/msgpack-c/releases/download/cpp-2.1.5/msgpack-2.1.5.tar.gz \
+ && tar -xzf msgpack-2.1.5.tar.gz \
+ && cd msgpack-2.1.5 && mkdir build && cd build \
+ && cmake -DMSGPACK_CXX11=ON -DMSGPACK_BUILD_EXAMPLES=OFF -DCMAKE_INSTALL_PREFIX=/usr .. \
+ && make -j8 && make install \
+ && cd ../.. && rm -rf msgpack-2.1.5 msgpack-2.1.5.tar.gz
--- /dev/null
+FROM ubuntu:16.04
+MAINTAINER Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+RUN apt-get update \
+ && apt-get install -y llvm llvm-dev clang make cmake git wget libncurses5-dev libreadline-dev nettle-dev libgnutls28-dev libuv1-dev libmsgpack-dev libjsoncpp-dev libasio-dev cython3 python3-dev python3-setuptools libcppunit-dev \
+ && apt-get remove -y gcc g++ && apt-get autoremove -y && apt-get clean
+
+ENV CC cc
+ENV CXX c++
+
+# build restbed from sources
+RUN git clone --recursive https://github.com/corvusoft/restbed.git \
+ && cd restbed && mkdir build && cd build \
+ && cmake -DBUILD_TESTS=NO -DBUILD_EXAMPLES=NO -DBUILD_SSL=NO -DBUILD_SHARED=YES -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_LIBDIR=lib .. \
+ && make -j8 install \
+ && cd .. && rm -rf restbed
+
+#build msgpack from source
+RUN wget https://github.com/msgpack/msgpack-c/releases/download/cpp-2.1.5/msgpack-2.1.5.tar.gz \
+ && tar -xzf msgpack-2.1.5.tar.gz \
+ && cd msgpack-2.1.5 && mkdir build && cd build \
+ && cmake -DMSGPACK_CXX11=ON -DMSGPACK_BUILD_EXAMPLES=OFF -DCMAKE_INSTALL_PREFIX=/usr .. \
+ && make -j8 && make install \
+ && cd ../.. && rm -rf msgpack-2.1.5 msgpack-2.1.5.tar.gz
--- /dev/null
+FROM aberaud/opendht-deps
+MAINTAINER Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+
+COPY . /root/opendht
+RUN cd /root/opendht && mkdir build && cd build \
+ && cmake -DCMAKE_INSTALL_PREFIX=/usr -DOPENDHT_PYTHON=On -DOPENDHT_LTO=On -DOPENDHT_TESTS=ON .. \
+ && make -j8 && ./opendht_unit_tests && make install
--- /dev/null
+FROM aberaud/opendht-deps-llvm
+MAINTAINER Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+
+COPY . /root/opendht
+RUN cd /root/opendht && mkdir build && cd build \
+ && cmake -DCMAKE_INSTALL_PREFIX=/usr -DOPENDHT_PYTHON=On -DOPENDHT_TESTS=ON .. \
+ && make -j8 && ./opendht_unit_tests && make install
--- /dev/null
+FROM aberaud/opendht-deps
+MAINTAINER Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+
+COPY . /root/opendht
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "opendht/dhtrunner.h"
+#if OPENDHT_PROXY_SERVER
+#include "opendht/dht_proxy_server.h"
+#endif
+#include "opendht/log.h"
+#include "opendht/default_types.h"
+#ifdef OPENDHT_INDEXATION
+#include "opendht/indexation/pht.h"
+#endif
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Authors: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ * Simon Désaulniers <simon.desaulniers@savoirfairelinux.com>
+ * Sébastien Blin <sebastien.blin@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "infohash.h"
+#include "value.h"
+
+#include <vector>
+#include <memory>
+#include <functional>
+
+#ifdef OPENDHT_JSONCPP
+#include <json/json.h>
+#endif
+
+namespace dht {
+
+struct Node;
+
+/**
+ * Current status of a DHT node.
+ */
+enum class NodeStatus {
+ Disconnected, // 0 nodes
+ Connecting, // 1+ nodes
+ Connected // 1+ good nodes
+};
+
+struct OPENDHT_PUBLIC NodeStats {
+ unsigned good_nodes {0},
+ dubious_nodes {0},
+ cached_nodes {0},
+ incoming_nodes {0};
+ unsigned table_depth {0};
+ unsigned getKnownNodes() const { return good_nodes + dubious_nodes; }
+ unsigned long getNetworkSizeEstimation() const { return 8 * std::exp2(table_depth); }
+ std::string toString() const;
+
+#ifdef OPENDHT_JSONCPP
+ /**
+ * Build a json object from a NodeStats
+ */
+ Json::Value toJson() const;
+ NodeStats() {};
+ explicit NodeStats(const Json::Value& v);
+#endif
+
+ MSGPACK_DEFINE_MAP(good_nodes, dubious_nodes, cached_nodes, incoming_nodes, table_depth)
+};
+
+struct OPENDHT_PUBLIC NodeInfo {
+ InfoHash id;
+ InfoHash node_id;
+ NodeStats ipv4;
+ NodeStats ipv6;
+
+#ifdef OPENDHT_JSONCPP
+ /**
+ * Build a json object from a NodeStats
+ */
+ Json::Value toJson() const;
+ NodeInfo() {};
+ explicit NodeInfo(const Json::Value& v);
+#endif
+
+ MSGPACK_DEFINE_MAP(id, node_id, ipv4, ipv6)
+};
+
+/**
+ * Dht configuration.
+ */
+struct OPENDHT_PUBLIC Config {
+ /** DHT node ID */
+ InfoHash node_id;
+
+ /**
+ * DHT network ID. A node will only talk with other nodes having
+ * the same network ID.
+ * Network ID 0 (default) represents the main public network.
+ */
+ NetId network;
+
+ /** For testing purposes only, enables bootstrap mode */
+ bool is_bootstrap;
+
+ /** Makes the DHT responsible to maintain its stored values. Consumes more ressources. */
+ bool maintain_storage;
+};
+
+/**
+ * SecureDht configuration.
+ */
+struct OPENDHT_PUBLIC SecureDhtConfig
+{
+ Config node_config;
+ crypto::Identity id;
+};
+
+static constexpr size_t DEFAULT_STORAGE_LIMIT {1024 * 1024 * 64};
+
+using ValuesExport = std::pair<InfoHash, Blob>;
+
+using QueryCallback = std::function<bool(const std::vector<std::shared_ptr<FieldValueIndex>>& fields)>;
+using GetCallback = std::function<bool(const std::vector<std::shared_ptr<Value>>& values)>;
+using ValueCallback = std::function<bool(const std::vector<std::shared_ptr<Value>>& values, bool expired)>;
+using GetCallbackSimple = std::function<bool(std::shared_ptr<Value> value)>;
+using ShutdownCallback = std::function<void()>;
+
+using CertificateStoreQuery = std::function<std::vector<std::shared_ptr<crypto::Certificate>>(const InfoHash& pk_id)>;
+
+typedef bool (*GetCallbackRaw)(std::shared_ptr<Value>, void *user_data);
+
+OPENDHT_PUBLIC GetCallbackSimple bindGetCb(GetCallbackRaw raw_cb, void* user_data);
+OPENDHT_PUBLIC GetCallback bindGetCb(GetCallbackSimple cb);
+
+using DoneCallback = std::function<void(bool success, const std::vector<std::shared_ptr<Node>>& nodes)>;
+typedef void (*DoneCallbackRaw)(bool, std::vector<std::shared_ptr<Node>>*, void *user_data);
+typedef void (*ShutdownCallbackRaw)(void *user_data);
+typedef void (*DoneCallbackSimpleRaw)(bool, void *user_data);
+typedef bool (*FilterRaw)(const Value&, void *user_data);
+
+using DoneCallbackSimple = std::function<void(bool success)>;
+
+OPENDHT_PUBLIC ShutdownCallback bindShutdownCb(ShutdownCallbackRaw shutdown_cb_raw, void* user_data);
+OPENDHT_PUBLIC DoneCallback bindDoneCb(DoneCallbackSimple donecb);
+OPENDHT_PUBLIC DoneCallback bindDoneCb(DoneCallbackRaw raw_cb, void* user_data);
+OPENDHT_PUBLIC DoneCallbackSimple bindDoneCbSimple(DoneCallbackSimpleRaw raw_cb, void* user_data);
+OPENDHT_PUBLIC Value::Filter bindFilterRaw(FilterRaw raw_filter, void* user_data);
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "infohash.h"
+#include "utils.h"
+#include "rng.h"
+
+extern "C" {
+#include <gnutls/gnutls.h>
+#include <gnutls/abstract.h>
+#include <gnutls/x509.h>
+}
+
+#include <vector>
+#include <memory>
+
+#ifdef _WIN32
+#include <iso646.h>
+#endif
+
+namespace dht {
+
+/**
+ * Contains all crypto primitives
+ */
+namespace crypto {
+
+class OPENDHT_PUBLIC CryptoException : public std::runtime_error {
+ public:
+ CryptoException(const std::string& str) : std::runtime_error(str) {};
+};
+
+/**
+ * Exception thrown when a decryption error happened.
+ */
+class OPENDHT_PUBLIC DecryptError : public CryptoException {
+ public:
+ DecryptError(const std::string& str = "") : CryptoException(str) {};
+};
+
+struct PrivateKey;
+struct Certificate;
+class RevocationList;
+
+using Identity = std::pair<std::shared_ptr<PrivateKey>, std::shared_ptr<Certificate>>;
+
+/**
+ * A public key.
+ */
+struct OPENDHT_PUBLIC PublicKey
+{
+ PublicKey() {}
+
+ /**
+ * Takes ownership of an existing gnutls_pubkey.
+ */
+ PublicKey(gnutls_pubkey_t k) : pk(k) {}
+ PublicKey(const Blob& pk);
+ PublicKey(PublicKey&& o) noexcept : pk(o.pk) { o.pk = nullptr; };
+
+ ~PublicKey();
+ explicit operator bool() const { return pk; }
+ bool operator ==(const PublicKey& o) const {
+ return pk == o.pk || getId() == o.getId();
+ }
+ bool operator !=(const PublicKey& o) const {
+ return !(*this == o);
+ }
+
+ PublicKey& operator=(PublicKey&& o) noexcept;
+
+ /**
+ * Get public key fingerprint
+ */
+ InfoHash getId() const;
+
+ /**
+ * Get public key long fingerprint
+ */
+ PkId getLongId() const;
+
+ bool checkSignature(const Blob& data, const Blob& signature) const;
+ Blob encrypt(const Blob&) const;
+
+ void pack(Blob& b) const;
+ void unpack(const uint8_t* dat, size_t dat_size);
+
+ std::string toString() const;
+
+ template <typename Packer>
+ void msgpack_pack(Packer& p) const
+ {
+ Blob b;
+ pack(b);
+ p.pack_bin(b.size());
+ p.pack_bin_body((const char*)b.data(), b.size());
+ }
+
+ void msgpack_unpack(msgpack::object o);
+
+ gnutls_pubkey_t pk {};
+private:
+ PublicKey(const PublicKey&) = delete;
+ PublicKey& operator=(const PublicKey&) = delete;
+ void encryptBloc(const uint8_t* src, size_t src_size, uint8_t* dst, size_t dst_size) const;
+};
+
+/**
+ * A private key, including the corresponding public key.
+ */
+struct OPENDHT_PUBLIC PrivateKey
+{
+ PrivateKey();
+ //PrivateKey(gnutls_privkey_t k) : key(k) {}
+
+ /**
+ * Takes ownership of an existing gnutls_x509_privkey.
+ */
+ PrivateKey(gnutls_x509_privkey_t k);
+
+ PrivateKey(PrivateKey&& o) noexcept;
+ PrivateKey& operator=(PrivateKey&& o) noexcept;
+
+ PrivateKey(const Blob& import, const std::string& password = {});
+ ~PrivateKey();
+ explicit operator bool() const { return key; }
+
+ PublicKey getPublicKey() const;
+ Blob serialize(const std::string& password = {}) const;
+
+ /**
+ * Sign the provided binary object.
+ * @returns the signature data.
+ */
+ Blob sign(const Blob&) const;
+
+ /**
+ * Try to decrypt the provided cypher text.
+ * In case of failure a CryptoException is thrown.
+ * @returns the decrypted data.
+ */
+ Blob decrypt(const Blob& cypher) const;
+
+ /**
+ * Generate a new RSA key pair
+ * @param key_length : size of the modulus in bits
+ * Minimim value: 2048
+ * Recommended values: 4096, 8192
+ */
+ static PrivateKey generate(unsigned key_length = 4096);
+ static PrivateKey generateEC();
+
+ gnutls_privkey_t key {};
+ gnutls_x509_privkey_t x509_key {};
+private:
+ PrivateKey(const PrivateKey&) = delete;
+ PrivateKey& operator=(const PrivateKey&) = delete;
+ Blob decryptBloc(const uint8_t* src, size_t src_size) const;
+
+ //friend dht::crypto::Identity dht::crypto::generateIdentity(const std::string&, dht::crypto::Identity, unsigned key_length);
+};
+
+
+class OPENDHT_PUBLIC RevocationList
+{
+ using clock = std::chrono::system_clock;
+ using time_point = clock::time_point;
+ using duration = clock::duration;
+public:
+ RevocationList();
+ RevocationList(const Blob& b);
+ RevocationList(RevocationList&& o) : crl(o.crl) { o.crl = nullptr; }
+ ~RevocationList();
+
+ RevocationList& operator=(RevocationList&& o) { crl = o.crl; o.crl = nullptr; return *this; }
+
+ void pack(Blob& b) const;
+ void unpack(const uint8_t* dat, size_t dat_size);
+ Blob getPacked() const {
+ Blob b;
+ pack(b);
+ return b;
+ }
+
+ template <typename Packer>
+ void msgpack_pack(Packer& p) const
+ {
+ Blob b = getPacked();
+ p.pack_bin(b.size());
+ p.pack_bin_body((const char*)b.data(), b.size());
+ }
+
+ void msgpack_unpack(msgpack::object o);
+
+ void revoke(const Certificate& crt, time_point t = time_point::min());
+
+ bool isRevoked(const Certificate& crt) const;
+
+ /**
+ * Sign this revocation list using provided key and certificate.
+ * Validity_period sets the duration until next update (default to no next update).
+ */
+ void sign(const PrivateKey&, const Certificate&, duration validity_period = {});
+ void sign(const Identity& id) { sign(*id.first, *id.second); }
+
+ bool isSignedBy(const Certificate& issuer) const;
+
+ std::string toString() const;
+
+ /**
+ * Read the CRL number extension field.
+ */
+ Blob getNumber() const;
+
+ /** Read CRL issuer Common Name (CN) */
+ std::string getIssuerName() const;
+
+ /** Read CRL issuer User ID (UID) */
+ std::string getIssuerUID() const;
+
+ time_point getUpdateTime() const;
+ time_point getNextUpdateTime() const;
+
+ gnutls_x509_crl_t get() { return crl; }
+ gnutls_x509_crl_t getCopy() const {
+ if (not crl)
+ return nullptr;
+ auto copy = RevocationList(getPacked());
+ gnutls_x509_crl_t ret = copy.crl;
+ copy.crl = nullptr;
+ return ret;
+ }
+
+private:
+ gnutls_x509_crl_t crl {};
+ RevocationList(const RevocationList&) = delete;
+ RevocationList& operator=(const RevocationList&) = delete;
+};
+
+
+struct OPENDHT_PUBLIC Certificate {
+ Certificate() {}
+
+ /**
+ * Take ownership of existing gnutls structure
+ */
+ Certificate(gnutls_x509_crt_t crt) : cert(crt) {}
+
+ Certificate(Certificate&& o) noexcept : cert(o.cert), issuer(std::move(o.issuer)) { o.cert = nullptr; };
+
+ /**
+ * Import certificate (PEM or DER) or certificate chain (PEM),
+ * ordered from subject to issuer
+ */
+ Certificate(const Blob& crt);
+ Certificate(const std::string& pem) : cert(nullptr) {
+ unpack((const uint8_t*)pem.data(), pem.size());
+ }
+ Certificate(const uint8_t* dat, size_t dat_size) : cert(nullptr) {
+ unpack(dat, dat_size);
+ }
+
+ /**
+ * Import certificate chain (PEM or DER),
+ * ordered from subject to issuer
+ */
+ template<typename Iterator>
+ Certificate(const Iterator& begin, const Iterator& end) {
+ unpack(begin, end);
+ }
+
+ /**
+ * Import certificate chain (PEM or DER),
+ * ordered from subject to issuer
+ */
+ template<typename Iterator>
+ Certificate(const std::vector<std::pair<Iterator, Iterator>>& certs) {
+ unpack(certs);
+ }
+
+ Certificate& operator=(Certificate&& o) noexcept;
+ ~Certificate();
+
+ void pack(Blob& b) const;
+ void unpack(const uint8_t* dat, size_t dat_size);
+ Blob getPacked() const {
+ Blob b;
+ pack(b);
+ return b;
+ }
+
+ /**
+ * Import certificate chain (PEM or DER).
+ * Certificates are not checked during import.
+ *
+ * Iterator is the type of an iterator or pointer to
+ * gnutls_x509_crt_t or Blob instances to import, that should be
+ * ordered from subject to issuer.
+ */
+ template<typename Iterator>
+ void unpack(const Iterator& begin, const Iterator& end)
+ {
+ std::shared_ptr<Certificate> tmp_subject {};
+ std::shared_ptr<Certificate> first {};
+ for (Iterator icrt = begin; icrt < end; ++icrt) {
+ auto tmp_crt = std::make_shared<Certificate>(*icrt);
+ if (tmp_subject)
+ tmp_subject->issuer = tmp_crt;
+ tmp_subject = std::move(tmp_crt);
+ if (!first)
+ first = tmp_subject;
+ }
+ *this = first ? std::move(*first) : Certificate();
+ }
+
+ /**
+ * Import certificate chain (PEM or DER).
+ * Certificates are not checked during import.
+ *
+ * Iterator is the type of an iterator or pointer to the bytes of
+ * the certificates to import.
+ *
+ * @param certs list of (begin, end) iterator pairs, pointing to the
+ * PEM or DER certificate data to import, that should be
+ * ordered from subject to issuer.
+ */
+ template<typename Iterator>
+ void unpack(const std::vector<std::pair<Iterator, Iterator>>& certs)
+ {
+ std::shared_ptr<Certificate> tmp_issuer;
+ // reverse iteration
+ for (auto li = certs.rbegin(); li != certs.rend(); ++li) {
+ Certificate tmp_crt;
+ gnutls_x509_crt_init(&tmp_crt.cert);
+ const gnutls_datum_t crt_dt {(uint8_t*)&(*li->first), (unsigned)(li->second-li->first)};
+ int err = gnutls_x509_crt_import(tmp_crt.cert, &crt_dt, GNUTLS_X509_FMT_PEM);
+ if (err != GNUTLS_E_SUCCESS)
+ err = gnutls_x509_crt_import(tmp_crt.cert, &crt_dt, GNUTLS_X509_FMT_DER);
+ if (err != GNUTLS_E_SUCCESS)
+ throw CryptoException(std::string("Could not read certificate - ") + gnutls_strerror(err));
+ tmp_crt.issuer = tmp_issuer;
+ tmp_issuer = std::make_shared<Certificate>(std::move(tmp_crt));
+ }
+ *this = tmp_issuer ? std::move(*tmp_issuer) : Certificate();
+ }
+
+ template <typename Packer>
+ void msgpack_pack(Packer& p) const
+ {
+ Blob b;
+ pack(b);
+ p.pack_bin(b.size());
+ p.pack_bin_body((const char*)b.data(), b.size());
+ }
+
+ void msgpack_unpack(msgpack::object o);
+
+ explicit operator bool() const { return cert; }
+ PublicKey getPublicKey() const;
+
+ /** Same as getPublicKey().getId() */
+ InfoHash getId() const;
+ /** Same as getPublicKey().getLongId() */
+ PkId getLongId() const;
+
+ /** Read certificate Common Name (CN) */
+ std::string getName() const;
+
+ /** Read certificate User ID (UID) */
+ std::string getUID() const;
+
+ /** Read certificate issuer Common Name (CN) */
+ std::string getIssuerName() const;
+
+ /** Read certificate issuer User ID (UID) */
+ std::string getIssuerUID() const;
+
+ enum class NameType { UNKNOWN = 0, RFC822, DNS, URI, IP };
+
+ /** Read certificate alternative names */
+ std::vector<std::pair<NameType, std::string>> getAltNames() const;
+
+ std::chrono::system_clock::time_point getActivation() const;
+ std::chrono::system_clock::time_point getExpiration() const;
+
+ /**
+ * Returns true if the certificate is marked as a Certificate Authority
+ * and has necessary key usage flags to sign certificates.
+ */
+ bool isCA() const;
+
+ /**
+ * PEM encoded certificate.
+ * If chain is true, the issuer chain will be included (default).
+ */
+ std::string toString(bool chain = true) const;
+
+ std::string print() const;
+
+ /**
+ * As a CA, revoke a certificate, adding it to
+ * the attached Certificate Revocation List (CRL)
+ */
+ void revoke(const PrivateKey&, const Certificate&);
+
+ /**
+ * Get the list of certificates revoked as as CA.
+ */
+ std::vector<std::shared_ptr<RevocationList>> getRevocationLists() const;
+
+ /**
+ * Attach existing revocation list.
+ */
+ void addRevocationList(RevocationList&&);
+ void addRevocationList(std::shared_ptr<RevocationList>);
+
+ static Certificate generate(const PrivateKey& key, const std::string& name = "dhtnode", Identity ca = {}, bool is_ca = false);
+
+ gnutls_x509_crt_t getCopy() const {
+ if (not cert)
+ return nullptr;
+ auto copy = Certificate(getPacked());
+ gnutls_x509_crt_t ret = copy.cert;
+ copy.cert = nullptr;
+ return ret;
+ }
+
+ std::vector<gnutls_x509_crt_t>
+ getChain(bool copy = false) const
+ {
+ if (not cert)
+ return {};
+ std::vector<gnutls_x509_crt_t> crts;
+ for (auto c = this; c; c = c->issuer.get())
+ crts.emplace_back(copy ? c->getCopy() : c->cert);
+ return crts;
+ }
+
+ std::pair<
+ std::vector<gnutls_x509_crt_t>,
+ std::vector<gnutls_x509_crl_t>
+ >
+ getChainWithRevocations(bool copy = false) const
+ {
+ if (not cert)
+ return {};
+ std::vector<gnutls_x509_crt_t> crts;
+ std::vector<gnutls_x509_crl_t> crls;
+ for (auto c = this; c; c = c->issuer.get()) {
+ crts.emplace_back(copy ? c->getCopy() : c->cert);
+ crls.reserve(crls.size() + c->revocation_lists.size());
+ for (const auto& crl : c->revocation_lists)
+ crls.emplace_back(copy ? crl->getCopy() : crl->get());
+ }
+ return {crts, crls};
+ }
+
+ gnutls_x509_crt_t cert {};
+ std::shared_ptr<Certificate> issuer {};
+private:
+ Certificate(const Certificate&) = delete;
+ Certificate& operator=(const Certificate&) = delete;
+
+ struct crlNumberCmp {
+ bool operator() (const std::shared_ptr<RevocationList>& lhs, const std::shared_ptr<RevocationList>& rhs) const {
+ return lhs->getNumber() < rhs->getNumber();
+ }
+ };
+
+ std::set<std::shared_ptr<RevocationList>, crlNumberCmp> revocation_lists;
+};
+
+struct OPENDHT_PUBLIC TrustList
+{
+ struct VerifyResult {
+ int ret;
+ unsigned result;
+ bool hasError() const { return ret < 0; }
+ bool isValid() const { return !hasError() and !(result & GNUTLS_CERT_INVALID); }
+ explicit operator bool() const { return isValid(); }
+ std::string toString() const;
+ OPENDHT_PUBLIC friend std::ostream& operator<< (std::ostream& s, const VerifyResult& h);
+ };
+
+ TrustList();
+ TrustList(TrustList&& o) : trust(std::move(o.trust)) {
+ o.trust = nullptr;
+ }
+ TrustList& operator=(TrustList&& o);
+ ~TrustList();
+ void add(const Certificate& crt);
+ void add(const RevocationList& crl);
+ void remove(const Certificate& crt, bool parents = true);
+ VerifyResult verify(const Certificate& crt) const;
+
+private:
+ TrustList(const TrustList& o) = delete;
+ TrustList& operator=(const TrustList& o) = delete;
+ gnutls_x509_trust_list_t trust;
+};
+
+template <class T>
+class OPENDHT_PUBLIC secure_vector
+{
+public:
+ secure_vector() {}
+ secure_vector(secure_vector<T> const&) = default;
+ secure_vector(secure_vector<T> &&) = default;
+ explicit secure_vector(unsigned size): data_(size) {}
+ explicit secure_vector(unsigned size, T _item): data_(size, _item) {}
+ explicit secure_vector(const std::vector<T>& c): data_(c) {}
+ secure_vector(std::vector<T>&& c): data_(std::move(c)) {}
+ ~secure_vector() { clean(); }
+
+ static secure_vector<T> getRandom(size_t size) {
+ secure_vector<T> ret(size);
+ crypto::random_device rdev;
+#ifdef _WIN32
+ std::uniform_int_distribution<int> rand_byte{ 0, std::numeric_limits<uint8_t>::max() };
+#else
+ std::uniform_int_distribution<uint8_t> rand_byte;
+#endif
+ std::generate_n((uint8_t*)ret.data_.data(), ret.size()*sizeof(T), std::bind(rand_byte, std::ref(rdev)));
+ return ret;
+ }
+ secure_vector<T>& operator=(const secure_vector<T>& c) {
+ if (&c == this)
+ return *this;
+ clean();
+ data_ = c.data_;
+ return *this;
+ }
+ secure_vector<T>& operator=(secure_vector<T>&& c) {
+ if (&c == this)
+ return *this;
+ clean();
+ data_ = std::move(c.data_);
+ return *this;
+ }
+ secure_vector<T>& operator=(std::vector<T>&& c) {
+ clean();
+ data_ = std::move(c);
+ return *this;
+ }
+ std::vector<T>& writable() { clean(); return data_; }
+ const std::vector<T>& makeInsecure() const { return data_; }
+ const uint8_t* data() const { return data_.data(); }
+
+ void clean() {
+ clean(data_.begin(), data_.end());
+ }
+
+ void clear() { clean(); data_.clear(); }
+
+ size_t size() const { return data_.size(); }
+ bool empty() const { return data_.empty(); }
+
+ void swap(secure_vector<T>& other) { data_.swap(other.data_); }
+ void resize(size_t s) {
+ if (s == data_.size()) return;
+ if (s < data_.size()) {
+ //shrink
+ clean(data_.begin()+s, data_.end());
+ data_.resize(s);
+ } else {
+ //grow
+ auto data = std::move(data_); // move protected data
+ clear();
+ data_.resize(s);
+ std::copy(data.begin(), data.end(), data_.begin());
+ clean(data.begin(), data.end());
+ }
+ }
+
+private:
+ /**
+ * Securely wipe memory
+ */
+ static void clean(const typename std::vector<T>::iterator& i, const typename std::vector<T>::iterator& j) {
+ volatile uint8_t* b = reinterpret_cast<uint8_t*>(&*i);
+ volatile uint8_t* e = reinterpret_cast<uint8_t*>(&*j);
+ std::fill(b, e, 0);
+ }
+
+ std::vector<T> data_;
+};
+
+using SecureBlob = secure_vector<uint8_t>;
+
+/**
+ * Generate an RSA key pair (4096 bits) and a certificate.
+ * @param name the name used in the generated certificate
+ * @param ca if set, the certificate authority that will sign the generated certificate.
+ * If not set, the generated certificate will be a self-signed CA.
+ * @param key_length stength of the generated private key (bits).
+ */
+OPENDHT_PUBLIC Identity generateIdentity(const std::string& name, Identity ca, unsigned key_length, bool is_ca);
+OPENDHT_PUBLIC Identity generateIdentity(const std::string& name = "dhtnode", Identity ca = {}, unsigned key_length = 4096);
+
+OPENDHT_PUBLIC Identity generateEcIdentity(const std::string& name, Identity ca, bool is_ca);
+OPENDHT_PUBLIC Identity generateEcIdentity(const std::string& name = "dhtnode", Identity ca = {});
+
+
+/**
+ * Performs SHA512, SHA256 or SHA1, depending on hash_length.
+ * Attempts to choose an hash function with
+ * output size of at least hash_length bytes, Current implementation
+ * will use SHA1 for hash_length up to 20 bytes,
+ * will use SHA256 for hash_length up to 32 bytes,
+ * will use SHA512 for hash_length of 33 bytes and more.
+ */
+OPENDHT_PUBLIC Blob hash(const Blob& data, size_t hash_length = 512/8);
+
+OPENDHT_PUBLIC void hash(const uint8_t* data, size_t data_length, uint8_t* hash, size_t hash_length);
+
+/**
+ * Generates an encryption key from a text password,
+ * making the key longer to bruteforce.
+ * The generated key also depends on a unique salt value of any size,
+ * that can be transmitted in clear, and will be generated if
+ * not provided (32 bytes).
+ */
+OPENDHT_PUBLIC Blob stretchKey(const std::string& password, Blob& salt, size_t key_length = 512/8);
+
+/**
+ * AES-GCM encryption. Key must be 128, 192 or 256 bits long (16, 24 or 32 bytes).
+ */
+OPENDHT_PUBLIC Blob aesEncrypt(const Blob& data, const Blob& key);
+OPENDHT_PUBLIC Blob aesEncrypt(const Blob& data, const std::string& password);
+
+/**
+ * AES-GCM decryption.
+ */
+OPENDHT_PUBLIC Blob aesDecrypt(const Blob& data, const Blob& key);
+OPENDHT_PUBLIC Blob aesDecrypt(const Blob& data, const std::string& password);
+
+}
+}
--- /dev/null
+#pragma once
+
+// Generic helper definitions for shared library support
+#if defined _WIN32 || defined __CYGWIN__
+ #define OPENDHT_IMPORT __declspec(dllimport)
+ #define OPENDHT_EXPORT __declspec(dllexport)
+ #define OPENDHT_HIDDEN
+#else
+ #define OPENDHT_IMPORT __attribute__ ((visibility ("default")))
+ #define OPENDHT_EXPORT __attribute__ ((visibility ("default")))
+ #define OPENDHT_HIDDEN __attribute__ ((visibility ("hidden")))
+#endif
+
+// Now we use the generic helper definitions above to define OPENDHT_PUBLIC and OPENDHT_LOCAL.
+// OPENDHT_PUBLIC is used for the public API symbols. It either DLL imports or DLL exports (or does nothing for static build)
+// OPENDHT_LOCAL is used for non-api symbols.
+
+#ifdef opendht_EXPORTS // defined if OpenDHT is compiled as a shared library
+ #ifdef OPENDHT_BUILD // defined if we are building the OpenDHT shared library (instead of using it)
+ #define OPENDHT_PUBLIC OPENDHT_EXPORT
+ #else
+ #define OPENDHT_PUBLIC OPENDHT_IMPORT
+ #endif // OPENDHT_BUILD
+ #define OPENDHT_LOCAL OPENDHT_HIDDEN
+#else // opendht_EXPORTS is not defined: this means OpenDHT is a static lib.
+ #define OPENDHT_PUBLIC
+ #define OPENDHT_LOCAL
+#endif // opendht_EXPORTS
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "value.h"
+#include "sockaddr.h"
+
+namespace dht {
+enum class ImStatus : uint8_t {
+ NONE = 0,
+ TYPING,
+ RECEIVED,
+ READ
+};
+}
+MSGPACK_ADD_ENUM(dht::ImStatus)
+
+namespace dht {
+
+class OPENDHT_PUBLIC DhtMessage : public Value::Serializable<DhtMessage>
+{
+public:
+ static const ValueType TYPE;
+
+ DhtMessage(std::string s = {}, Blob msg = {}) : service(s), data(msg) {}
+
+ std::string getService() const {
+ return service;
+ }
+
+ static Value::Filter getFilter() { return {}; }
+
+ static bool storePolicy(InfoHash key, std::shared_ptr<Value>& value, const InfoHash& from, const SockAddr&);
+
+ static Value::Filter ServiceFilter(std::string s);
+
+ /** print value for debugging */
+ friend std::ostream& operator<< (std::ostream&, const DhtMessage&);
+
+ std::string service;
+ Blob data;
+ MSGPACK_DEFINE(service, data)
+};
+
+template <typename T>
+class OPENDHT_PUBLIC SignedValue : public Value::Serializable<T>
+{
+private:
+ using BaseClass = Value::Serializable<T>;
+
+public:
+ virtual void unpackValue(const Value& v) override {
+ if (v.owner)
+ from = v.owner->getId();
+ BaseClass::unpackValue(v);
+ }
+
+ static Value::Filter getFilter() {
+ return [](const Value& v){ return v.isSigned(); };
+ }
+
+ dht::InfoHash from;
+};
+
+template <typename T>
+class OPENDHT_PUBLIC EncryptedValue : public SignedValue<T>
+{
+public:
+ using BaseClass = SignedValue<T>;
+
+public:
+ virtual void unpackValue(const Value& v) override {
+ to = v.recipient;
+ BaseClass::unpackValue(v);
+ }
+
+ static Value::Filter getFilter() {
+ return Value::Filter::chain(
+ BaseClass::getFilter(),
+ [](const Value& v) { return static_cast<bool>(v.recipient); }
+ );
+ }
+
+ dht::InfoHash to;
+};
+
+
+
+
+class OPENDHT_PUBLIC ImMessage : public SignedValue<ImMessage>
+{
+private:
+ using BaseClass = SignedValue<ImMessage>;
+
+public:
+ static const ValueType TYPE;
+
+ ImMessage() {}
+ ImMessage(dht::Value::Id id, std::string&& m, long d = 0)
+ : id(id), msg(std::move(m)), date(d) {}
+ ImMessage(dht::Value::Id id, std::string&& dt, std::string&& m, long d = 0)
+ : id(id), msg(std::move(m)), datatype(std::move(dt)), date(d) {}
+
+ virtual void unpackValue(const Value& v) override {
+ to = v.recipient;
+ SignedValue::unpackValue(v);
+ }
+
+ dht::InfoHash to;
+ dht::Value::Id id {0};
+ std::string msg;
+ std::string datatype;
+ long date {0};
+ ImStatus status {ImStatus::NONE};
+
+ MSGPACK_DEFINE_MAP(id, msg, date, status, datatype)
+};
+
+class OPENDHT_PUBLIC TrustRequest : public EncryptedValue<TrustRequest>
+{
+private:
+ using BaseClass = EncryptedValue<TrustRequest>;
+
+public:
+ static const ValueType TYPE;
+
+ TrustRequest() {}
+ TrustRequest(std::string s) : service(s) {}
+ TrustRequest(std::string s, const Blob& d) : service(s), payload(d) {}
+
+ static Value::Filter getFilter() {
+ return EncryptedValue::getFilter();
+ }
+
+ std::string service;
+ Blob payload;
+ bool confirm {false};
+ MSGPACK_DEFINE_MAP(service, payload, confirm)
+};
+
+class OPENDHT_PUBLIC IceCandidates : public EncryptedValue<IceCandidates>
+{
+private:
+ using BaseClass = EncryptedValue<IceCandidates>;
+
+public:
+ static const ValueType TYPE;
+
+ IceCandidates() {}
+ IceCandidates(Value::Id msg_id, Blob ice) : id(msg_id), ice_data(ice) {}
+
+ static Value::Filter getFilter() {
+ return EncryptedValue::getFilter();
+ }
+
+ template <typename Packer>
+ void msgpack_pack(Packer& pk) const
+ {
+ pk.pack_array(2);
+ pk.pack(id);
+#if 1
+ pk.pack_bin(ice_data.size());
+ pk.pack_bin_body((const char*)ice_data.data(), ice_data.size());
+#else
+ // hack for backward compatibility with old opendht compiled with msgpack 1.0
+ // remove when enough people have moved to new versions
+ pk.pack_array(ice_data.size());
+ for (uint8_t b : ice_data)
+ pk.pack(b);
+#endif
+ }
+
+ virtual void msgpack_unpack(msgpack::object o)
+ {
+ if (o.type != msgpack::type::ARRAY) throw msgpack::type_error();
+ if (o.via.array.size < 2) throw msgpack::type_error();
+ id = o.via.array.ptr[0].as<Value::Id>();
+ ice_data = unpackBlob(o.via.array.ptr[1]);
+ }
+
+ Value::Id id {0};
+ Blob ice_data;
+};
+
+/* "Peer" announcement
+ */
+class OPENDHT_PUBLIC IpServiceAnnouncement : public Value::Serializable<IpServiceAnnouncement>
+{
+private:
+ using BaseClass = Value::Serializable<IpServiceAnnouncement>;
+
+public:
+ static const ValueType TYPE;
+
+ IpServiceAnnouncement(sa_family_t family = AF_UNSPEC, in_port_t p = 0) {
+ addr.setFamily(family);
+ addr.setPort(p);
+ }
+
+ IpServiceAnnouncement(const SockAddr& sa) : addr(sa) {}
+
+ IpServiceAnnouncement(const Blob& b) {
+ msgpack_unpack(unpackMsg(b).get());
+ }
+
+ template <typename Packer>
+ void msgpack_pack(Packer& pk) const
+ {
+ pk.pack_bin(addr.getLength());
+ pk.pack_bin_body((const char*)addr.get(), addr.getLength());
+ }
+
+ virtual void msgpack_unpack(msgpack::object o)
+ {
+ if (o.type == msgpack::type::BIN)
+ addr = {(sockaddr*)o.via.bin.ptr, (socklen_t)o.via.bin.size};
+ else
+ throw msgpack::type_error();
+ }
+
+ in_port_t getPort() const {
+ return addr.getPort();
+ }
+ void setPort(in_port_t p) {
+ addr.setPort(p);
+ }
+
+ const SockAddr& getPeerAddr() const {
+ return addr;
+ }
+
+ virtual const ValueType& getType() const {
+ return TYPE;
+ }
+
+ static bool storePolicy(InfoHash, std::shared_ptr<Value>&, const InfoHash&, const SockAddr&);
+
+ /** print value for debugging */
+ friend std::ostream& operator<< (std::ostream&, const IpServiceAnnouncement&);
+
+private:
+ SockAddr addr;
+};
+
+
+OPENDHT_PUBLIC extern const std::array<std::reference_wrapper<const ValueType>, 5> DEFAULT_TYPES;
+
+OPENDHT_PUBLIC extern const std::array<std::reference_wrapper<const ValueType>, 1> DEFAULT_INSECURE_TYPES;
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Authors: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ * Simon Désaulniers <simon.desaulniers@savoirfairelinux.com>
+ * Sébastien Blin <sebastien.blin@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "infohash.h"
+#include "value.h"
+#include "utils.h"
+#include "network_engine.h"
+#include "scheduler.h"
+#include "routing_table.h"
+#include "callbacks.h"
+#include "dht_interface.h"
+
+#include <string>
+#include <array>
+#include <vector>
+#include <map>
+#include <functional>
+#include <memory>
+
+#ifdef _WIN32
+#include <iso646.h>
+#endif
+
+namespace dht {
+
+namespace net {
+struct Request;
+} /* namespace net */
+
+struct Storage;
+struct ValueStorage;
+class StorageBucket;
+struct Listener;
+struct LocalListener;
+
+/**
+ * Main Dht class.
+ * Provides a Distributed Hash Table node.
+ *
+ * Must be given open UDP sockets and ::periodic must be
+ * called regularly.
+ */
+class OPENDHT_PUBLIC Dht final : public DhtInterface {
+public:
+
+ Dht();
+
+ /**
+ * Initialise the Dht with two open sockets (for IPv4 and IP6)
+ * and an ID for the node.
+ */
+ Dht(const int& s, const int& s6, Config config);
+ virtual ~Dht();
+
+ /**
+ * Get the ID of the node.
+ */
+ inline const InfoHash& getNodeId() const { return myid; }
+
+ /**
+ * Get the current status of the node for the given family.
+ */
+ NodeStatus getStatus(sa_family_t af) const;
+
+ NodeStatus getStatus() const {
+ return std::max(getStatus(AF_INET), getStatus(AF_INET6));
+ }
+
+ /**
+ * Performs final operations before quitting.
+ */
+ void shutdown(ShutdownCallback cb);
+
+ /**
+ * Returns true if the node is running (have access to an open socket).
+ *
+ * af: address family. If non-zero, will return true if the node
+ * is running for the provided family.
+ */
+ bool isRunning(sa_family_t af = 0) const;
+
+ virtual void registerType(const ValueType& type) {
+ types.registerType(type);
+ }
+ const ValueType& getType(ValueType::Id type_id) const {
+ return types.getType(type_id);
+ }
+
+ /**
+ * Insert a node in the main routing table.
+ * The node is not pinged, so this should be
+ * used to bootstrap efficiently from previously known nodes.
+ */
+ void insertNode(const InfoHash& id, const SockAddr&);
+ void insertNode(const InfoHash& id, const sockaddr* sa, socklen_t salen) {
+ insertNode(id, SockAddr(sa, salen));
+ }
+ void insertNode(const NodeExport& n) {
+ insertNode(n.id, SockAddr(n.ss, n.sslen));
+ }
+
+ void pingNode(const sockaddr*, socklen_t, DoneCallbackSimple&& cb={});
+
+ time_point periodic(const uint8_t *buf, size_t buflen, const SockAddr&);
+ time_point periodic(const uint8_t *buf, size_t buflen, const sockaddr* from, socklen_t fromlen) {
+ return periodic(buf, buflen, SockAddr(from, fromlen));
+ }
+
+ /**
+ * Get a value by searching on all available protocols (IPv4, IPv6),
+ * and call the provided get callback when values are found at key.
+ * The operation will start as soon as the node is connected to the network.
+ * @param cb a function called when new values are found on the network.
+ * It should return false to stop the operation.
+ * @param donecb a function called when the operation is complete.
+ cb and donecb won't be called again afterward.
+ * @param f a filter function used to prefilter values.
+ */
+ virtual void get(const InfoHash& key, GetCallback cb, DoneCallback donecb={}, Value::Filter&& f={}, Where&& w = {});
+ virtual void get(const InfoHash& key, GetCallback cb, DoneCallbackSimple donecb={}, Value::Filter&& f={}, Where&& w = {}) {
+ get(key, cb, bindDoneCb(donecb), std::forward<Value::Filter>(f), std::forward<Where>(w));
+ }
+ virtual void get(const InfoHash& key, GetCallbackSimple cb, DoneCallback donecb={}, Value::Filter&& f={}, Where&& w = {}) {
+ get(key, bindGetCb(cb), donecb, std::forward<Value::Filter>(f), std::forward<Where>(w));
+ }
+ virtual void get(const InfoHash& key, GetCallbackSimple cb, DoneCallbackSimple donecb, Value::Filter&& f={}, Where&& w = {}) {
+ get(key, bindGetCb(cb), bindDoneCb(donecb), std::forward<Value::Filter>(f), std::forward<Where>(w));
+ }
+ /**
+ * Similar to Dht::get, but sends a Query to filter data remotely.
+ * @param key the key for which to query data for.
+ * @param cb a function called when new values are found on the network.
+ * It should return false to stop the operation.
+ * @param done_cb a function called when the operation is complete.
+ cb and done_cb won't be called again afterward.
+ * @param q a query used to filter values on the remotes before they send a
+ * response.
+ */
+ virtual void query(const InfoHash& key, QueryCallback cb, DoneCallback done_cb = {}, Query&& q = {});
+ virtual void query(const InfoHash& key, QueryCallback cb, DoneCallbackSimple done_cb = {}, Query&& q = {}) {
+ query(key, cb, bindDoneCb(done_cb), std::forward<Query>(q));
+ }
+
+ /**
+ * Get locally stored data for the given hash.
+ */
+ std::vector<Sp<Value>> getLocal(const InfoHash& key, Value::Filter f = Value::AllFilter()) const;
+
+ /**
+ * Get locally stored data for the given key and value id.
+ */
+ Sp<Value> getLocalById(const InfoHash& key, Value::Id vid) const;
+
+ /**
+ * Announce a value on all available protocols (IPv4, IPv6).
+ *
+ * The operation will start as soon as the node is connected to the network.
+ * The done callback will be called once, when the first announce succeeds, or fails.
+ */
+ void put(const InfoHash& key,
+ Sp<Value>,
+ DoneCallback cb=nullptr,
+ time_point created=time_point::max(),
+ bool permanent = false);
+ void put(const InfoHash& key,
+ const Sp<Value>& v,
+ DoneCallbackSimple cb,
+ time_point created=time_point::max(),
+ bool permanent = false)
+ {
+ put(key, v, bindDoneCb(cb), created, permanent);
+ }
+
+ void put(const InfoHash& key,
+ Value&& v,
+ DoneCallback cb=nullptr,
+ time_point created=time_point::max(),
+ bool permanent = false)
+ {
+ put(key, std::make_shared<Value>(std::move(v)), cb, created, permanent);
+ }
+ void put(const InfoHash& key,
+ Value&& v,
+ DoneCallbackSimple cb,
+ time_point created=time_point::max(),
+ bool permanent = false)
+ {
+ put(key, std::forward<Value>(v), bindDoneCb(cb), created, permanent);
+ }
+
+ /**
+ * Get data currently being put at the given hash.
+ */
+ std::vector<Sp<Value>> getPut(const InfoHash&);
+
+ /**
+ * Get data currently being put at the given hash with the given id.
+ */
+ Sp<Value> getPut(const InfoHash&, const Value::Id&);
+
+ /**
+ * Stop any put/announce operation at the given location,
+ * for the value with the given id.
+ */
+ bool cancelPut(const InfoHash&, const Value::Id&);
+
+ /**
+ * Listen on the network for any changes involving a specified hash.
+ * The node will register to receive updates from relevent nodes when
+ * new values are added or removed.
+ *
+ * @return a token to cancel the listener later.
+ */
+ virtual size_t listen(const InfoHash&, ValueCallback, Value::Filter={}, Where={});
+
+ virtual size_t listen(const InfoHash& key, GetCallback cb, Value::Filter f={}, Where w={}) {
+ return listen(key, [cb](const std::vector<Sp<Value>>& vals, bool expired){
+ if (not expired)
+ return cb(vals);
+ return true;
+ }, std::forward<Value::Filter>(f), std::forward<Where>(w));
+ }
+ virtual size_t listen(const InfoHash& key, GetCallbackSimple cb, Value::Filter f={}, Where w={}) {
+ return listen(key, bindGetCb(cb), std::forward<Value::Filter>(f), std::forward<Where>(w));
+ }
+
+ virtual bool cancelListen(const InfoHash&, size_t token);
+
+ /**
+ * Inform the DHT of lower-layer connectivity changes.
+ * This will cause the DHT to assume a public IP address change.
+ * The DHT will recontact neighbor nodes, re-register for listen ops etc.
+ */
+ void connectivityChanged(sa_family_t);
+ void connectivityChanged() {
+ reported_addr.clear();
+ connectivityChanged(AF_INET);
+ connectivityChanged(AF_INET6);
+ }
+
+ /**
+ * Get the list of good nodes for local storage saving purposes
+ * The list is ordered to minimize the back-to-work delay.
+ */
+ std::vector<NodeExport> exportNodes();
+
+ std::vector<ValuesExport> exportValues() const;
+ void importValues(const std::vector<ValuesExport>&);
+
+ NodeStats getNodesStats(sa_family_t af) const;
+
+ std::string getStorageLog() const;
+ std::string getStorageLog(const InfoHash&) const;
+
+ std::string getRoutingTablesLog(sa_family_t) const;
+ std::string getSearchesLog(sa_family_t) const;
+ std::string getSearchLog(const InfoHash&, sa_family_t af = AF_UNSPEC) const;
+
+ void dumpTables() const;
+ std::vector<unsigned> getNodeMessageStats(bool in = false) {
+ return network_engine.getNodeMessageStats(in);
+ }
+
+ /**
+ * Set the in-memory storage limit in bytes
+ */
+ void setStorageLimit(size_t limit = DEFAULT_STORAGE_LIMIT) {
+ max_store_size = limit;
+ }
+
+ /**
+ * Returns the total memory usage of stored values and the number
+ * of stored values.
+ */
+ std::pair<size_t, size_t> getStoreSize() const {
+ return {total_store_size, total_values};
+ }
+
+ std::vector<SockAddr> getPublicAddress(sa_family_t family = 0);
+
+ void pushNotificationReceived(const std::map<std::string, std::string>&) {}
+ void resubscribe(unsigned) {}
+
+private:
+
+ /* When performing a search, we search for up to SEARCH_NODES closest nodes
+ to the destination, and use the additional ones to backtrack if any of
+ the target 8 turn out to be dead. */
+ static constexpr unsigned SEARCH_NODES {14};
+
+ /* The number of bad nodes is limited in order to help determine
+ * presence of connectivity changes. See
+ * https://github.com/savoirfairelinux/opendht/issues/137 for details.
+ *
+ * According to the tables, 25 is a good average value for big networks. If
+ * the network is small, normal search expiration process will handle the
+ * situation.
+ * */
+ static constexpr unsigned SEARCH_MAX_BAD_NODES {25};
+
+ /* Concurrent search nodes requested count */
+ static constexpr unsigned MAX_REQUESTED_SEARCH_NODES {4};
+
+ /* Number of listening nodes */
+ static constexpr unsigned LISTEN_NODES {4};
+
+ /* The maximum number of hashes we're willing to track. */
+ static constexpr unsigned MAX_HASHES {64 * 1024};
+
+ /* The maximum number of searches we keep data about. */
+ static constexpr unsigned MAX_SEARCHES {64 * 1024};
+
+ static constexpr std::chrono::minutes MAX_STORAGE_MAINTENANCE_EXPIRE_TIME {10};
+
+ /* The time after which we consider a search to be expirable. */
+ static constexpr std::chrono::minutes SEARCH_EXPIRE_TIME {62};
+
+ /* Timeout for listen */
+ static constexpr std::chrono::seconds LISTEN_EXPIRE_TIME {30};
+
+ static constexpr std::chrono::seconds REANNOUNCE_MARGIN {10};
+
+ static constexpr size_t TOKEN_SIZE {32};
+
+ // internal structures
+ struct SearchNode;
+ struct Get;
+ struct Announce;
+ struct Search;
+
+ // prevent copy
+ Dht(const Dht&) = delete;
+ Dht& operator=(const Dht&) = delete;
+
+ InfoHash myid {};
+
+ uint64_t secret {};
+ uint64_t oldsecret {};
+
+ // registred types
+ TypeStore types;
+
+ // are we a bootstrap node ?
+ // note: Any running node can be used as a bootstrap node.
+ // Only nodes running only as bootstrap nodes should
+ // be put in bootstrap mode.
+ const bool is_bootstrap {false};
+ const bool maintain_storage {false};
+
+ // the stuff
+ RoutingTable buckets4 {};
+ RoutingTable buckets6 {};
+
+ std::map<InfoHash, Storage> store;
+ std::map<SockAddr, StorageBucket, SockAddr::ipCmp> store_quota;
+ size_t total_values {0};
+ size_t total_store_size {0};
+ size_t max_store_size {DEFAULT_STORAGE_LIMIT};
+
+ using SearchMap = std::map<InfoHash, Sp<Search>>;
+ SearchMap searches4 {};
+ SearchMap searches6 {};
+ uint16_t search_id {0};
+
+ // map a global listen token to IPv4, IPv6 specific listen tokens.
+ // 0 is the invalid token.
+ std::map<size_t, std::tuple<size_t, size_t, size_t>> listeners {};
+ size_t listener_token {1};
+
+ // timing
+ Scheduler scheduler;
+ Sp<Scheduler::Job> nextNodesConfirmation {};
+ Sp<Scheduler::Job> nextStorageMaintenance {};
+
+ net::NetworkEngine network_engine;
+ unsigned pending_pings4 {0};
+ unsigned pending_pings6 {0};
+
+ using ReportedAddr = std::pair<unsigned, SockAddr>;
+ std::vector<ReportedAddr> reported_addr;
+
+ std::mt19937_64 rd {crypto::getSeededRandomEngine<std::mt19937_64>()};
+
+ void rotateSecrets();
+
+ Blob makeToken(const SockAddr&, bool old) const;
+ bool tokenMatch(const Blob& token, const SockAddr&) const;
+
+ void reportedAddr(const SockAddr&);
+
+ // Storage
+ void storageAddListener(const InfoHash& id, const Sp<Node>& node, size_t tid, Query&& = {});
+ bool storageStore(const InfoHash& id, const Sp<Value>& value, time_point created, const SockAddr& sa = {}, bool permanent = false);
+ bool storageErase(const InfoHash& id, Value::Id vid);
+ bool storageRefresh(const InfoHash& id, Value::Id vid);
+ void expireStore();
+ void expireStorage(InfoHash h);
+ void expireStore(decltype(store)::iterator);
+
+ void storageChanged(const InfoHash& id, Storage& st, ValueStorage&, bool newValue);
+ std::string printStorageLog(const decltype(store)::value_type&) const;
+
+ /**
+ * For a given storage, if values don't belong there anymore because this
+ * node is too far from the target, values are sent to the appropriate
+ * nodes.
+ */
+ void dataPersistence(InfoHash id);
+ size_t maintainStorage(decltype(store)::value_type&, bool force=false, DoneCallback donecb=nullptr);
+
+ // Buckets
+ RoutingTable& buckets(sa_family_t af) { return af == AF_INET ? buckets4 : buckets6; }
+ const RoutingTable& buckets(sa_family_t af) const { return af == AF_INET ? buckets4 : buckets6; }
+ Bucket* findBucket(const InfoHash& id, sa_family_t af) {
+ RoutingTable::iterator b;
+ switch (af) {
+ case AF_INET:
+ b = buckets4.findBucket(id);
+ return b == buckets4.end() ? nullptr : &(*b);
+ case AF_INET6:
+ b = buckets6.findBucket(id);
+ return b == buckets6.end() ? nullptr : &(*b);
+ default:
+ return nullptr;
+ }
+ }
+ const Bucket* findBucket(const InfoHash& id, sa_family_t af) const {
+ return const_cast<Dht*>(this)->findBucket(id, af);
+ }
+
+ void expireBuckets(RoutingTable&);
+ void sendCachedPing(Bucket& b);
+ bool bucketMaintenance(RoutingTable&);
+ void dumpBucket(const Bucket& b, std::ostream& out) const;
+
+ // Nodes
+ void onNewNode(const Sp<Node>& node, int confirm);
+ const Sp<Node> findNode(const InfoHash& id, sa_family_t af) const;
+ bool trySearchInsert(const Sp<Node>& node);
+
+ // Searches
+
+ inline SearchMap& searches(sa_family_t af) { return af == AF_INET ? searches4 : searches6; }
+ inline const SearchMap& searches(sa_family_t af) const { return af == AF_INET ? searches4 : searches6; }
+
+ /**
+ * Low-level method that will perform a search on the DHT for the specified
+ * infohash (id), using the specified IP version (IPv4 or IPv6).
+ */
+ Sp<Search> search(const InfoHash& id, sa_family_t af, GetCallback = {}, QueryCallback = {}, DoneCallback = {}, Value::Filter = {}, const Sp<Query>& q = {});
+
+ void announce(const InfoHash& id, sa_family_t af, Sp<Value> value, DoneCallback callback, time_point created=time_point::max(), bool permanent = false);
+ size_t listenTo(const InfoHash& id, sa_family_t af, ValueCallback cb, Value::Filter f = Value::AllFilter(), const Sp<Query>& q = {});
+
+ /**
+ * Refill the search with good nodes if possible.
+ *
+ * @param sr The search to refill.
+ *
+ * @return the number inserted nodes.
+ */
+ unsigned refill(Search& sr);
+ void expireSearches();
+
+ void confirmNodes();
+ void expire();
+
+ /**
+ * Generic function to execute when a 'get' request has completed.
+ *
+ * @param status The request passed by the network engine.
+ * @param answer The answer from the network engine.
+ * @param ws A weak pointer to the search concerned by the request.
+ * @param query The query sent to the node.
+ */
+ void searchNodeGetDone(const net::Request& status,
+ net::RequestAnswer&& answer,
+ std::weak_ptr<Search> ws,
+ Sp<Query> query);
+
+ /**
+ * Generic function to execute when a 'get' request expires.
+ *
+ * @param status The request passed by the network engine.
+ * @param over Whether we're done to try sending the request to the node
+ * or not. This lets us mark a node as candidate.
+ * @param ws A weak pointer to the search concerned by the request.
+ * @param query The query sent to the node.
+ */
+ void searchNodeGetExpired(const net::Request& status, bool over, std::weak_ptr<Search> ws, Sp<Query> query);
+
+ /**
+ * This method recovers sends individual request for values per id.
+ *
+ * @param ws A weak pointer to the Search.
+ * @param query The initial query passed through the API.
+ * @param n The node to which send the requests.
+ */
+ void paginate(std::weak_ptr<Search> ws, Sp<Query> query, SearchNode* n);
+
+ /**
+ * If update is true, this method will also send message to synced but non-updated search nodes.
+ */
+ SearchNode* searchSendGetValues(Sp<Search> sr, SearchNode *n = nullptr, bool update = true);
+
+ /**
+ * Forwards an 'announce' request for a list of nodes to the network engine.
+ *
+ * @param sr The search for which we want to announce a value.
+ * @param announce The 'announce' structure.
+ */
+ void searchSendAnnounceValue(const Sp<Search>& sr);
+
+ /**
+ * Main process of a Search's operations. This function will demand the
+ * network engine to send requests packets for all pending operations
+ * ('get', 'put' and 'listen').
+ *
+ * @param sr The search to execute its operations.
+ */
+ void searchStep(Sp<Search>);
+ void searchSynchedNodeListen(const Sp<Search>&, SearchNode&);
+
+ void dumpSearch(const Search& sr, std::ostream& out) const;
+
+ bool neighbourhoodMaintenance(RoutingTable&);
+
+ void onError(Sp<net::Request> node, net::DhtProtocolException e);
+ /* when our address is reported by a distant peer. */
+ void onReportedAddr(const InfoHash& id, const SockAddr&);
+ /* when we receive a ping request */
+ net::RequestAnswer onPing(Sp<Node> node);
+ /* when we receive a "find node" request */
+ net::RequestAnswer onFindNode(Sp<Node> node, const InfoHash& hash, want_t want);
+ void onFindNodeDone(const Sp<Node>& status,
+ net::RequestAnswer& a,
+ Sp<Search> sr);
+ /* when we receive a "get values" request */
+ net::RequestAnswer onGetValues(Sp<Node> node,
+ const InfoHash& hash,
+ want_t want,
+ const Query& q);
+ void onGetValuesDone(const Sp<Node>& status,
+ net::RequestAnswer& a,
+ Sp<Search>& sr,
+ const Sp<Query>& orig_query);
+ /* when we receive a listen request */
+ net::RequestAnswer onListen(Sp<Node> node,
+ const InfoHash& hash,
+ const Blob& token,
+ size_t socket_id,
+ const Query& query);
+ void onListenDone(const Sp<Node>& status,
+ net::RequestAnswer& a,
+ Sp<Search>& sr);
+ /* when we receive an announce request */
+ net::RequestAnswer onAnnounce(Sp<Node> node,
+ const InfoHash& hash,
+ const Blob& token,
+ const std::vector<Sp<Value>>& v,
+ const time_point& created);
+ net::RequestAnswer onRefresh(Sp<Node> node,
+ const InfoHash& hash,
+ const Blob& token,
+ const Value::Id& vid);
+ void onAnnounceDone(const Sp<Node>& status,
+ net::RequestAnswer& a,
+ Sp<Search>& sr);
+};
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author: Sébastien Blin <sebastien.blin@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "infohash.h"
+#include "log_enable.h"
+
+namespace dht {
+
+class OPENDHT_PUBLIC DhtInterface {
+public:
+ DhtInterface() = default;
+ virtual ~DhtInterface() = default;
+
+ // [[deprecated]]
+ using Status = NodeStatus;
+ // [[deprecated]]
+ using NodeExport = dht::NodeExport;
+
+ /**
+ * Get the current status of the node for the given family.
+ */
+ virtual NodeStatus getStatus(sa_family_t af) const = 0;
+ virtual NodeStatus getStatus() const = 0;
+
+ /**
+ * Get the ID of the DHT node.
+ */
+ virtual const InfoHash& getNodeId() const = 0;
+
+ /**
+ * Performs final operations before quitting.
+ */
+ virtual void shutdown(ShutdownCallback cb) = 0;
+
+ /**
+ * Returns true if the node is running (have access to an open socket).
+ *
+ * af: address family. If non-zero, will return true if the node
+ * is running for the provided family.
+ */
+ virtual bool isRunning(sa_family_t af = 0) const = 0;
+
+ virtual void registerType(const ValueType& type) = 0;
+
+ virtual const ValueType& getType(ValueType::Id type_id) const = 0;
+
+ /**
+ * Insert a node in the main routing table.
+ * The node is not pinged, so this should be
+ * used to bootstrap efficiently from previously known nodes.
+ */
+ virtual void insertNode(const InfoHash& id, const SockAddr&) = 0;
+ virtual void insertNode(const InfoHash& id, const sockaddr* sa, socklen_t salen) = 0;
+ virtual void insertNode(const NodeExport& n) = 0;
+
+ virtual void pingNode(const sockaddr*, socklen_t, DoneCallbackSimple&& cb={}) = 0;
+
+ virtual time_point periodic(const uint8_t *buf, size_t buflen, const SockAddr&) = 0;
+ virtual time_point periodic(const uint8_t *buf, size_t buflen, const sockaddr* from, socklen_t fromlen) = 0;
+
+ /**
+ * Get a value by searching on all available protocols (IPv4, IPv6),
+ * and call the provided get callback when values are found at key.
+ * The operation will start as soon as the node is connected to the network.
+ * @param cb a function called when new values are found on the network.
+ * It should return false to stop the operation.
+ * @param donecb a function called when the operation is complete.
+ cb and donecb won't be called again afterward.
+ * @param f a filter function used to prefilter values.
+ */
+ virtual void get(const InfoHash& key, GetCallback cb, DoneCallback donecb={}, Value::Filter&& f={}, Where&& w = {}) = 0;
+ virtual void get(const InfoHash& key, GetCallback cb, DoneCallbackSimple donecb={}, Value::Filter&& f={}, Where&& w = {}) = 0;
+ virtual void get(const InfoHash& key, GetCallbackSimple cb, DoneCallback donecb={}, Value::Filter&& f={}, Where&& w = {}) = 0;
+ virtual void get(const InfoHash& key, GetCallbackSimple cb, DoneCallbackSimple donecb, Value::Filter&& f={}, Where&& w = {}) = 0;
+
+ /**
+ * Similar to Dht::get, but sends a Query to filter data remotely.
+ * @param key the key for which to query data for.
+ * @param cb a function called when new values are found on the network.
+ * It should return false to stop the operation.
+ * @param done_cb a function called when the operation is complete.
+ cb and done_cb won't be called again afterward.
+ * @param q a query used to filter values on the remotes before they send a
+ * response.
+ */
+ virtual void query(const InfoHash& key, QueryCallback cb, DoneCallback done_cb = {}, Query&& q = {}) = 0;
+ virtual void query(const InfoHash& key, QueryCallback cb, DoneCallbackSimple done_cb = {}, Query&& q = {}) = 0;
+
+ /**
+ * Get locally stored data for the given hash.
+ */
+ virtual std::vector<Sp<Value>> getLocal(const InfoHash& key, Value::Filter f = Value::AllFilter()) const = 0;
+
+ /**
+ * Get locally stored data for the given key and value id.
+ */
+ virtual Sp<Value> getLocalById(const InfoHash& key, Value::Id vid) const = 0;
+
+ /**
+ * Announce a value on all available protocols (IPv4, IPv6).
+ *
+ * The operation will start as soon as the node is connected to the network.
+ * The done callback will be called once, when the first announce succeeds, or fails.
+ */
+ virtual void put(const InfoHash& key,
+ Sp<Value>,
+ DoneCallback cb=nullptr,
+ time_point created=time_point::max(),
+ bool permanent = false) = 0;
+ virtual void put(const InfoHash& key,
+ const Sp<Value>& v,
+ DoneCallbackSimple cb,
+ time_point created=time_point::max(),
+ bool permanent = false) = 0;
+ virtual void put(const InfoHash& key,
+ Value&& v,
+ DoneCallback cb=nullptr,
+ time_point created=time_point::max(),
+ bool permanent = false) = 0;
+ virtual void put(const InfoHash& key,
+ Value&& v,
+ DoneCallbackSimple cb,
+ time_point created=time_point::max(),
+ bool permanent = false) = 0;
+
+ /**
+ * Get data currently being put at the given hash.
+ */
+ virtual std::vector<Sp<Value>> getPut(const InfoHash&) = 0;
+
+ /**
+ * Get data currently being put at the given hash with the given id.
+ */
+ virtual Sp<Value> getPut(const InfoHash&, const Value::Id&) = 0;
+
+ /**
+ * Stop any put/announce operation at the given location,
+ * for the value with the given id.
+ */
+ virtual bool cancelPut(const InfoHash&, const Value::Id&) = 0;
+
+ /**
+ * Listen on the network for any changes involving a specified hash.
+ * The node will register to receive updates from relevent nodes when
+ * new values are added or removed.
+ *
+ * @return a token to cancel the listener later.
+ */
+ virtual size_t listen(const InfoHash&, GetCallback, Value::Filter={}, Where w = {}) = 0;
+ virtual size_t listen(const InfoHash& key, GetCallbackSimple cb, Value::Filter f={}, Where w = {}) = 0;
+ virtual size_t listen(const InfoHash&, ValueCallback, Value::Filter={}, Where w = {}) = 0;
+
+ virtual bool cancelListen(const InfoHash&, size_t token) = 0;
+
+ /**
+ * Inform the DHT of lower-layer connectivity changes.
+ * This will cause the DHT to assume a public IP address change.
+ * The DHT will recontact neighbor nodes, re-register for listen ops etc.
+ */
+ virtual void connectivityChanged(sa_family_t) = 0;
+ virtual void connectivityChanged() = 0;
+
+ /**
+ * Get the list of good nodes for local storage saving purposes
+ * The list is ordered to minimize the back-to-work delay.
+ */
+ virtual std::vector<NodeExport> exportNodes() = 0;
+
+ virtual std::vector<ValuesExport> exportValues() const = 0;
+ virtual void importValues(const std::vector<ValuesExport>&) = 0;
+
+ virtual NodeStats getNodesStats(sa_family_t af) const = 0;
+
+ virtual std::string getStorageLog() const = 0;
+ virtual std::string getStorageLog(const InfoHash&) const = 0;
+
+ virtual std::string getRoutingTablesLog(sa_family_t) const = 0;
+ virtual std::string getSearchesLog(sa_family_t) const = 0;
+ virtual std::string getSearchLog(const InfoHash&, sa_family_t af = AF_UNSPEC) const = 0;
+
+ virtual void dumpTables() const = 0;
+ virtual std::vector<unsigned> getNodeMessageStats(bool in = false) = 0;
+
+ /**
+ * Set the in-memory storage limit in bytes
+ */
+ virtual void setStorageLimit(size_t limit = DEFAULT_STORAGE_LIMIT) = 0;
+
+ /**
+ * Returns the total memory usage of stored values and the number
+ * of stored values.
+ */
+ virtual std::pair<size_t, size_t> getStoreSize() const = 0;
+
+ virtual std::vector<SockAddr> getPublicAddress(sa_family_t family = 0) = 0;
+
+ /**
+ * Enable or disable logging of DHT internal messages
+ */
+ virtual void setLoggers(LogMethod error = NOLOG, LogMethod warn = NOLOG, LogMethod debug = NOLOG)
+ {
+ DHT_LOG.DBG = debug;
+ DHT_LOG.WARN = warn;
+ DHT_LOG.ERR = error;
+ }
+
+ /**
+ * Only print logs related to the given InfoHash (if given), or disable filter (if zeroes).
+ */
+ virtual void setLogFilter(const InfoHash& f)
+ {
+ DHT_LOG.setFilter(f);
+ }
+
+ virtual void setPushNotificationToken(const std::string&) {};
+
+ /**
+ * Call linked callback with a push notification
+ * @param notification to process
+ */
+ virtual void pushNotificationReceived(const std::map<std::string, std::string>& data) = 0;
+
+protected:
+ bool logFilerEnable_ {};
+ InfoHash logFiler_ {};
+ Logger DHT_LOG;
+};
+
+} // namespace dht
--- /dev/null
+/*
+ * Copyright (C) 2016-2018 Savoir-faire Linux Inc.
+ * Author: Sébastien Blin <sebastien.blin@savoirfairelinux.com>
+ * Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#if OPENDHT_PROXY_CLIENT
+
+#pragma once
+
+#include <functional>
+#include <thread>
+#include <mutex>
+
+#include "callbacks.h"
+#include "def.h"
+#include "dht_interface.h"
+#include "scheduler.h"
+#include "proxy.h"
+
+namespace restbed {
+ class Request;
+}
+
+namespace Json {
+ class Value;
+}
+
+namespace dht {
+
+class SearchCache;
+
+class OPENDHT_PUBLIC DhtProxyClient final : public DhtInterface {
+public:
+
+ DhtProxyClient();
+
+ explicit DhtProxyClient(std::function<void()> loopSignal, const std::string& serverHost, const std::string& pushClientId = "");
+
+ virtual void setPushNotificationToken(const std::string& token) {
+#if OPENDHT_PUSH_NOTIFICATIONS
+ deviceKey_ = token;
+#endif
+ }
+
+ virtual ~DhtProxyClient();
+
+ /**
+ * Get the ID of the node.
+ */
+ inline const InfoHash& getNodeId() const { return myid; }
+
+ /**
+ * Get the current status of the node for the given family.
+ */
+ NodeStatus getStatus(sa_family_t af) const;
+ NodeStatus getStatus() const {
+ return std::max(getStatus(AF_INET), getStatus(AF_INET6));
+ }
+
+ /**
+ * Performs final operations before quitting.
+ */
+ void shutdown(ShutdownCallback cb);
+
+ /**
+ * Returns true if the node is running (have access to an open socket).
+ *
+ * af: address family. If non-zero, will return true if the node
+ * is running for the provided family.
+ */
+ bool isRunning(sa_family_t af = 0) const;
+
+ /**
+ * Get a value by asking the proxy and call the provided get callback when
+ * values are found at key.
+ * The operation will start as soon as the node is connected to the network.
+ * @param cb a function called when new values are found on the network.
+ * It should return false to stop the operation.
+ * @param donecb a function called when the operation is complete.
+ cb and donecb won't be called again afterward.
+ * @param f a filter function used to prefilter values.
+ */
+ virtual void get(const InfoHash& key, GetCallback cb, DoneCallback donecb={}, Value::Filter&& f={}, Where&& w = {});
+ virtual void get(const InfoHash& key, GetCallback cb, DoneCallbackSimple donecb={}, Value::Filter&& f={}, Where&& w = {}) {
+ get(key, cb, bindDoneCb(donecb), std::forward<Value::Filter>(f), std::forward<Where>(w));
+ }
+ virtual void get(const InfoHash& key, GetCallbackSimple cb, DoneCallback donecb={}, Value::Filter&& f={}, Where&& w = {}) {
+ get(key, bindGetCb(cb), donecb, std::forward<Value::Filter>(f), std::forward<Where>(w));
+ }
+ virtual void get(const InfoHash& key, GetCallbackSimple cb, DoneCallbackSimple donecb, Value::Filter&& f={}, Where&& w = {}) {
+ get(key, bindGetCb(cb), bindDoneCb(donecb), std::forward<Value::Filter>(f), std::forward<Where>(w));
+ }
+
+ /**
+ * Announce a value on all available protocols (IPv4, IPv6).
+ *
+ * The operation will start as soon as the node is connected to the network.
+ * The done callback will be called once, when the first announce succeeds, or fails.
+ * NOTE: For now, created parameter is ignored.
+ */
+ void put(const InfoHash& key,
+ Sp<Value>,
+ DoneCallback cb=nullptr,
+ time_point created=time_point::max(),
+ bool permanent = false);
+ void put(const InfoHash& key,
+ const Sp<Value>& v,
+ DoneCallbackSimple cb,
+ time_point created=time_point::max(),
+ bool permanent = false)
+ {
+ put(key, v, bindDoneCb(cb), created, permanent);
+ }
+
+ void put(const InfoHash& key,
+ Value&& v,
+ DoneCallback cb=nullptr,
+ time_point created=time_point::max(),
+ bool permanent = false)
+ {
+ put(key, std::make_shared<Value>(std::move(v)), cb, created, permanent);
+ }
+ void put(const InfoHash& key,
+ Value&& v,
+ DoneCallbackSimple cb,
+ time_point created=time_point::max(),
+ bool permanent = false)
+ {
+ put(key, std::forward<Value>(v), bindDoneCb(cb), created, permanent);
+ }
+
+ /**
+ * @param af the socket family
+ * @return node stats from the proxy
+ */
+ NodeStats getNodesStats(sa_family_t af) const;
+
+ /**
+ * @param family the socket family
+ * @return public address
+ */
+ std::vector<SockAddr> getPublicAddress(sa_family_t family = 0);
+
+ /**
+ * Listen on the network for any changes involving a specified hash.
+ * The node will register to receive updates from relevent nodes when
+ * new values are added or removed.
+ *
+ * @return a token to cancel the listener later.
+ */
+ virtual size_t listen(const InfoHash&, ValueCallback, Value::Filter={}, Where={});
+
+ virtual size_t listen(const InfoHash& key, GetCallback cb, Value::Filter f={}, Where w={}) {
+ return listen(key, [cb](const std::vector<Sp<Value>>& vals, bool expired){
+ if (not expired)
+ return cb(vals);
+ return true;
+ }, std::forward<Value::Filter>(f), std::forward<Where>(w));
+ }
+ virtual size_t listen(const InfoHash& key, GetCallbackSimple cb, Value::Filter f={}, Where w={}) {
+ return listen(key, bindGetCb(cb), std::forward<Value::Filter>(f), std::forward<Where>(w));
+ }
+ virtual bool cancelListen(const InfoHash& key, size_t token);
+
+ /**
+ * Call linked callback with a push notification
+ * @param notification to process
+ */
+ void pushNotificationReceived(const std::map<std::string, std::string>& notification);
+
+ time_point periodic(const uint8_t*, size_t, const SockAddr&);
+ time_point periodic(const uint8_t *buf, size_t buflen, const sockaddr* from, socklen_t fromlen) {
+ return periodic(buf, buflen, SockAddr(from, fromlen));
+ }
+
+
+ /**
+ * Similar to Dht::get, but sends a Query to filter data remotely.
+ * @param key the key for which to query data for.
+ * @param cb a function called when new values are found on the network.
+ * It should return false to stop the operation.
+ * @param done_cb a function called when the operation is complete.
+ cb and done_cb won't be called again afterward.
+ * @param q a query used to filter values on the remotes before they send a
+ * response.
+ */
+ virtual void query(const InfoHash& /*key*/, QueryCallback /*cb*/, DoneCallback /*done_cb*/ = {}, Query&& /*q*/ = {}) { }
+ virtual void query(const InfoHash& key, QueryCallback cb, DoneCallbackSimple done_cb = {}, Query&& q = {}) {
+ query(key, cb, bindDoneCb(done_cb), std::forward<Query>(q));
+ }
+
+ /**
+ * Get data currently being put at the given hash.
+ */
+ std::vector<Sp<Value>> getPut(const InfoHash&);
+
+ /**
+ * Get data currently being put at the given hash with the given id.
+ */
+ Sp<Value> getPut(const InfoHash&, const Value::Id&);
+
+ /**
+ * Stop any put/announce operation at the given location,
+ * for the value with the given id.
+ */
+ bool cancelPut(const InfoHash&, const Value::Id&);
+
+ void pingNode(const sockaddr*, socklen_t, DoneCallbackSimple&& /*cb*/={}) { }
+
+ virtual void registerType(const ValueType& type) {
+ types.registerType(type);
+ }
+ const ValueType& getType(ValueType::Id type_id) const {
+ return types.getType(type_id);
+ }
+
+ std::vector<Sp<Value>> getLocal(const InfoHash& k, Value::Filter filter) const;
+ Sp<Value> getLocalById(const InfoHash& k, Value::Id id) const;
+
+ /**
+ * NOTE: The following methods will not be implemented because the
+ * DhtProxyClient doesn't have any storage nor synchronization process
+ */
+ void insertNode(const InfoHash&, const SockAddr&) { }
+ void insertNode(const InfoHash&, const sockaddr*, socklen_t) { }
+ void insertNode(const NodeExport&) { }
+ std::pair<size_t, size_t> getStoreSize() const { return {}; }
+ std::vector<NodeExport> exportNodes() { return {}; }
+ std::vector<ValuesExport> exportValues() const { return {}; }
+ void importValues(const std::vector<ValuesExport>&) {}
+ std::string getStorageLog() const { return {}; }
+ std::string getStorageLog(const InfoHash&) const { return {}; }
+ std::string getRoutingTablesLog(sa_family_t) const { return {}; }
+ std::string getSearchesLog(sa_family_t) const { return {}; }
+ std::string getSearchLog(const InfoHash&, sa_family_t) const { return {}; }
+ void dumpTables() const {}
+ std::vector<unsigned> getNodeMessageStats(bool) { return {}; }
+ void setStorageLimit(size_t) {}
+ void connectivityChanged(sa_family_t) {
+ restartListeners();
+ }
+ void connectivityChanged() {
+ getProxyInfos();
+ restartListeners();
+ loopSignal_();
+ }
+
+private:
+ /**
+ * Start the connection with a server.
+ */
+ void startProxy();
+
+ /**
+ * Get informations from the proxy node
+ * @return the JSON returned by the proxy
+ */
+ struct InfoState;
+ void getProxyInfos();
+ void onProxyInfos(const Json::Value& val, sa_family_t family);
+ SockAddr parsePublicAddress(const Json::Value& val);
+
+ void opFailed();
+
+ size_t doListen(const InfoHash& key, ValueCallback, Value::Filter);
+ bool doCancelListen(const InfoHash& key, size_t token);
+
+ struct ListenState;
+ void sendListen(const std::shared_ptr<restbed::Request>& request, const ValueCallback&, const Value::Filter& filter, const Sp<ListenState>& state);
+ void sendSubscribe(const std::shared_ptr<restbed::Request>& request, const Sp<proxy::ListenToken>&, const Sp<ListenState>& state);
+
+ void doPut(const InfoHash&, Sp<Value>, DoneCallback, time_point created, bool permanent);
+
+ /**
+ * Initialize statusIpvX_
+ */
+ void getConnectivityStatus();
+ /**
+ * cancel all Listeners
+ */
+ void cancelAllListeners();
+ /**
+ * cancel all Operations
+ */
+ void cancelAllOperations();
+
+ std::string serverHost_;
+ std::string pushClientId_;
+
+ mutable std::mutex lockCurrentProxyInfos_;
+ NodeStatus statusIpv4_ {NodeStatus::Disconnected};
+ NodeStatus statusIpv6_ {NodeStatus::Disconnected};
+ NodeStats stats4_ {};
+ NodeStats stats6_ {};
+ SockAddr publicAddressV4_;
+ SockAddr publicAddressV6_;
+
+ InfoHash myid {};
+
+ // registred types
+ TypeStore types;
+
+ /**
+ * Store listen requests.
+ */
+ struct Listener;
+ struct ProxySearch;
+
+ size_t listenerToken_ {0};
+ std::map<InfoHash, ProxySearch> searches_;
+ mutable std::mutex searchLock_;
+
+ /**
+ * Store current put and get requests.
+ */
+ struct Operation
+ {
+ std::shared_ptr<restbed::Request> req;
+ std::thread thread;
+ std::shared_ptr<std::atomic_bool> finished;
+ };
+ std::vector<Operation> operations_;
+ std::mutex lockOperations_;
+ /**
+ * Callbacks should be executed in the main thread.
+ */
+ std::vector<std::function<void()>> callbacks_;
+ std::mutex lockCallbacks;
+
+ Sp<InfoState> infoState_;
+ std::thread statusThread_;
+ mutable std::mutex statusLock_;
+
+ Scheduler scheduler;
+ /**
+ * Retrieve if we can connect to the proxy (update statusIpvX_)
+ */
+ void confirmProxy();
+ Sp<Scheduler::Job> nextProxyConfirmation {};
+ Sp<Scheduler::Job> listenerRestart {};
+
+ /**
+ * Relaunch LISTEN requests if the client disconnect/reconnect.
+ */
+ void restartListeners();
+
+ /**
+ * Refresh a listen via a token
+ * @param token
+ */
+ void resubscribe(const InfoHash& key, Listener& listener);
+
+ /**
+ * If we want to use push notifications by default.
+ * NOTE: empty by default to avoid to use services like FCM or APN.
+ */
+ std::string deviceKey_ {};
+
+ const std::function<void()> loopSignal_;
+
+#if OPENDHT_PUSH_NOTIFICATIONS
+ void fillBodyToGetToken(std::shared_ptr<restbed::Request> request, unsigned token = 0);
+ void getPushRequest(Json::Value&) const;
+#endif // OPENDHT_PUSH_NOTIFICATIONS
+
+ std::atomic_bool isDestroying_ {false};
+};
+
+}
+
+#endif // OPENDHT_PROXY_CLIENT
--- /dev/null
+/*
+ * Copyright (C) 2017-2018 Savoir-faire Linux Inc.
+ * Author: Sébastien Blin <sebastien.blin@savoirfairelinux.com>
+ * Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#if OPENDHT_PROXY_SERVER
+
+#pragma once
+
+#include "callbacks.h"
+#include "def.h"
+#include "infohash.h"
+#include "proxy.h"
+#include "scheduler.h"
+#include "sockaddr.h"
+#include "value.h"
+
+#include <thread>
+#include <memory>
+#include <mutex>
+#include <restbed>
+
+#ifdef OPENDHT_JSONCPP
+#include <json/json.h>
+#endif
+
+namespace Json {
+ class Value;
+}
+
+namespace dht {
+
+class DhtRunner;
+
+/**
+ * Describes the REST API
+ */
+class OPENDHT_PUBLIC DhtProxyServer
+{
+public:
+ /**
+ * Start the Http server for OpenDHT
+ * @param dht the DhtRunner linked to this proxy server
+ * @param port to listen
+ * @param pushServer where to push notifications
+ * @note if the server fails to start (if port is already used or reserved),
+ * it will fails silently
+ */
+ DhtProxyServer(std::shared_ptr<DhtRunner> dht, in_port_t port = 8000, const std::string& pushServer = "");
+ virtual ~DhtProxyServer();
+
+ DhtProxyServer(const DhtProxyServer& other) = delete;
+ DhtProxyServer(DhtProxyServer&& other) = delete;
+ DhtProxyServer& operator=(const DhtProxyServer& other) = delete;
+ DhtProxyServer& operator=(DhtProxyServer&& other) = delete;
+
+ struct ServerStats {
+ /** Current number of listen operations */
+ size_t listenCount;
+ /** Current number of permanent put operations */
+ size_t putCount;
+ /** Current number of push tokens with at least one listen operation */
+ size_t pushListenersCount;
+ /** Average requests per second */
+ double requestRate;
+ /** Node Info **/
+ NodeInfo nodeInfo;
+
+ std::string toString() const {
+ std::ostringstream ss;
+ ss << "Listens: " << listenCount << " Puts: " << putCount << " PushListeners: " << pushListenersCount << std::endl;
+ ss << "Requests: " << requestRate << " per second." << std::endl;
+ auto& ni = nodeInfo;
+ auto& ipv4 = ni.ipv4;
+ if (ipv4.table_depth > 1) {
+ ss << "IPv4 Network estimation: " << ipv4.getNetworkSizeEstimation() << std::endl;;
+ }
+ auto& ipv6 = ni.ipv6;
+ if (ipv6.table_depth > 1) {
+ ss << "IPv6 Network estimation: " << ipv6.getNetworkSizeEstimation() << std::endl;;
+ }
+ return ss.str();
+ }
+
+#ifdef OPENDHT_JSONCPP
+ /**
+ * Build a json object from a NodeStats
+ */
+ Json::Value toJson() const {
+ Json::Value result;
+ result["listenCount"] = static_cast<Json::UInt64>(listenCount);
+ result["putCount"] = static_cast<Json::UInt64>(putCount);
+ result["pushListenersCount"] = static_cast<Json::UInt64>(pushListenersCount);
+ result["requestRate"] = requestRate;
+ result["nodeInfo"] = nodeInfo.toJson();
+ return result;
+ }
+#endif
+ };
+
+ ServerStats stats() const { return stats_; }
+
+ void updateStats() const;
+
+ std::shared_ptr<DhtRunner> getNode() const { return dht_; }
+
+ /**
+ * Stop the DhtProxyServer
+ */
+ void stop();
+
+private:
+ /**
+ * Return the PublicKey id, the node id and node stats
+ * Method: GET "/"
+ * Result: HTTP 200, body: Node infos in JSON format
+ * On error: HTTP 503, body: {"err":"xxxx"}
+ * @param session
+ */
+ void getNodeInfo(const std::shared_ptr<restbed::Session>& session) const;
+
+ /**
+ * Return ServerStats in JSON format
+ * Method: STATS "/"
+ * Result: HTTP 200, body: Node infos in JSON format
+ * @param session
+ */
+ void getStats(const std::shared_ptr<restbed::Session>& session) const;
+
+ /**
+ * Return Values of an infoHash
+ * Method: GET "/{InfoHash: .*}"
+ * Return: Multiple JSON object in parts. Example:
+ * Value in JSON format\n
+ * Value in JSON format
+ *
+ * On error: HTTP 503, body: {"err":"xxxx"}
+ * @param session
+ */
+ void get(const std::shared_ptr<restbed::Session>& session) const;
+
+ /**
+ * Listen incoming Values of an infoHash.
+ * Method: LISTEN "/{InfoHash: .*}"
+ * Return: Multiple JSON object in parts. Example:
+ * Value in JSON format\n
+ * Value in JSON format
+ *
+ * On error: HTTP 503, body: {"err":"xxxx"}
+ * @param session
+ */
+ void listen(const std::shared_ptr<restbed::Session>& session);
+
+ /**
+ * Put a value on the DHT
+ * Method: POST "/{InfoHash: .*}"
+ * body = Value to put in JSON
+ * Return: HTTP 200 if success and the value put in JSON
+ * On error: HTTP 503, body: {"err":"xxxx"} if no dht
+ * HTTP 400, body: {"err":"xxxx"} if bad json or HTTP 502 if put fails
+ * @param session
+ */
+ void put(const std::shared_ptr<restbed::Session>& session);
+
+ void cancelPut(const InfoHash& key, Value::Id vid);
+
+#if OPENDHT_PROXY_SERVER_IDENTITY
+ /**
+ * Put a value to sign by the proxy on the DHT
+ * Method: SIGN "/{InfoHash: .*}"
+ * body = Value to put in JSON
+ * Return: HTTP 200 if success and the value put in JSON
+ * On error: HTTP 503, body: {"err":"xxxx"} if no dht
+ * HTTP 400, body: {"err":"xxxx"} if bad json
+ * @param session
+ */
+ void putSigned(const std::shared_ptr<restbed::Session>& session) const;
+
+ /**
+ * Put a value to encrypt by the proxy on the DHT
+ * Method: ENCRYPT "/{hash: .*}"
+ * body = Value to put in JSON + "to":"infoHash"
+ * Return: HTTP 200 if success and the value put in JSON
+ * On error: HTTP 503, body: {"err":"xxxx"} if no dht
+ * HTTP 400, body: {"err":"xxxx"} if bad json
+ * @param session
+ */
+ void putEncrypted(const std::shared_ptr<restbed::Session>& session) const;
+#endif // OPENDHT_PROXY_SERVER_IDENTITY
+
+ /**
+ * Return Values of an infoHash filtered by a value id
+ * Method: GET "/{InfoHash: .*}/{ValueId: .*}"
+ * Return: Multiple JSON object in parts. Example:
+ * Value in JSON format\n
+ * Value in JSON format
+ *
+ * On error: HTTP 503, body: {"err":"xxxx"}
+ * @param session
+ */
+ void getFiltered(const std::shared_ptr<restbed::Session>& session) const;
+
+ /**
+ * Respond allowed Methods
+ * Method: OPTIONS "/{hash: .*}"
+ * Return: HTTP 200 + Allow: allowed methods
+ * See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/OPTIONS
+ * @param session
+ */
+ void handleOptionsMethod(const std::shared_ptr<restbed::Session>& session) const;
+
+ /**
+ * Remove finished listeners
+ * @param testSession if we remove the listener only if the session is closed
+ */
+ void removeClosedListeners(bool testSession = true);
+
+#if OPENDHT_PUSH_NOTIFICATIONS
+ /**
+ * Subscribe to push notifications for an iOS or Android device.
+ * Method: SUBSCRIBE "/{InfoHash: .*}"
+ * Body: {"key": "device_key", (optional)"isAndroid":false (default true)}"
+ * Return: {"token": x}" where x if a token to save
+ * @note: the listen will timeout after six hours (and send a push notification).
+ * so you need to refresh the operation each six hours.
+ * @param session
+ */
+ void subscribe(const std::shared_ptr<restbed::Session>& session);
+ /**
+ * Unsubscribe to push notifications for an iOS or Android device.
+ * Method: UNSUBSCRIBE "/{InfoHash: .*}"
+ * Body: {"key": "device_key", "token": x} where x if the token to cancel
+ * Return: nothing
+ * @param session
+ */
+ void unsubscribe(const std::shared_ptr<restbed::Session>& session);
+ /**
+ * Send a push notification via a gorush push gateway
+ * @param key of the device
+ * @param json, the content to send
+ */
+ void sendPushNotification(const std::string& key, const Json::Value& json, bool isAndroid) const;
+
+ /**
+ * Remove a push listener between a client and a hash
+ * @param pushToken
+ * @param key
+ * @param clientId
+ */
+ void cancelPushListen(const std::string& pushToken, const InfoHash& key, const std::string& clientId);
+
+
+#endif //OPENDHT_PUSH_NOTIFICATIONS
+
+ using clock = std::chrono::steady_clock;
+ using time_point = clock::time_point;
+
+ std::thread server_thread {};
+ std::unique_ptr<restbed::Service> service_;
+ std::shared_ptr<DhtRunner> dht_;
+
+ std::mutex schedulerLock_;
+ std::condition_variable schedulerCv_;
+ Scheduler scheduler_;
+ std::thread schedulerThread_;
+
+ Sp<Scheduler::Job> printStatsJob_;
+ mutable std::mutex statsMutex_;
+ mutable NodeInfo nodeInfo_ {};
+
+ // Handle client quit for listen.
+ // NOTE: can be simplified when we will supports restbed 5.0
+ std::thread listenThread_;
+ struct SessionToHashToken {
+ std::shared_ptr<restbed::Session> session;
+ InfoHash hash;
+ std::future<size_t> token;
+ };
+ std::vector<SessionToHashToken> currentListeners_;
+ std::mutex lockListener_;
+ std::atomic_bool stopListeners {false};
+
+ struct PermanentPut;
+ struct SearchPuts;
+ std::map<InfoHash, SearchPuts> puts_;
+
+ mutable std::atomic<size_t> requestNum_ {0};
+ mutable std::atomic<time_point> lastStatsReset_ {time_point::min()};
+
+ const std::string pushServer_;
+
+ mutable ServerStats stats_;
+
+#if OPENDHT_PUSH_NOTIFICATIONS
+ struct Listener;
+ struct PushListener;
+ std::mutex lockPushListeners_;
+ std::map<std::string, PushListener> pushListeners_;
+ proxy::ListenToken tokenPushNotif_ {0};
+#endif //OPENDHT_PUSH_NOTIFICATIONS
+};
+
+}
+
+#endif //OPENDHT_PROXY_SERVER
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Authors: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ * Simon Désaulniers <simon.desaulniers@savoirfairelinux.com>
+ * Sébastien Blin <sebastien.blin@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "infohash.h"
+#include "value.h"
+#include "callbacks.h"
+#include "sockaddr.h"
+#include "log_enable.h"
+#include "def.h"
+
+#include <thread>
+#include <mutex>
+#include <atomic>
+#include <condition_variable>
+#include <future>
+#include <exception>
+#include <queue>
+#include <chrono>
+
+namespace dht {
+
+struct Node;
+class SecureDht;
+struct SecureDhtConfig;
+
+/**
+ * Provides a thread-safe interface to run the (secure) DHT.
+ * The class will open sockets on the provided port and will
+ * either wait for (expectedly frequent) calls to ::loop() or start an internal
+ * thread that will update the DHT when appropriate.
+ */
+class OPENDHT_PUBLIC DhtRunner {
+
+public:
+ typedef std::function<void(NodeStatus, NodeStatus)> StatusCallback;
+
+ struct Config {
+ SecureDhtConfig dht_config;
+ bool threaded;
+ std::string proxy_server;
+ std::string push_node_id;
+ };
+
+ DhtRunner();
+ virtual ~DhtRunner();
+
+ void get(InfoHash id, GetCallbackSimple cb, DoneCallback donecb={}, Value::Filter f = Value::AllFilter(), Where w = {}) {
+ get(id, bindGetCb(cb), donecb, f, w);
+ }
+
+ void get(InfoHash id, GetCallbackSimple cb, DoneCallbackSimple donecb={}, Value::Filter f = Value::AllFilter(), Where w = {}) {
+ get(id, bindGetCb(cb), donecb, f, w);
+ }
+
+ void get(InfoHash hash, GetCallback vcb, DoneCallback dcb, Value::Filter f={}, Where w = {});
+
+ void get(InfoHash id, GetCallback cb, DoneCallbackSimple donecb={}, Value::Filter f = Value::AllFilter(), Where w = {}) {
+ get(id, cb, bindDoneCb(donecb), f, w);
+ }
+ void get(const std::string& key, GetCallback vcb, DoneCallbackSimple dcb={}, Value::Filter f = Value::AllFilter(), Where w = {});
+
+ template <class T>
+ void get(InfoHash hash, std::function<bool(std::vector<T>&&)> cb, DoneCallbackSimple dcb={})
+ {
+ get(hash, [=](const std::vector<std::shared_ptr<Value>>& vals) {
+ return cb(unpackVector<T>(vals));
+ },
+ dcb,
+ getFilterSet<T>());
+ }
+ template <class T>
+ void get(InfoHash hash, std::function<bool(T&&)> cb, DoneCallbackSimple dcb={})
+ {
+ get(hash, [=](const std::vector<std::shared_ptr<Value>>& vals) {
+ for (const auto& v : vals) {
+ try {
+ if (not cb(Value::unpack<T>(*v)))
+ return false;
+ } catch (const std::exception&) {
+ continue;
+ }
+ }
+ return true;
+ },
+ dcb,
+ getFilterSet<T>());
+ }
+
+ std::future<std::vector<std::shared_ptr<dht::Value>>> get(InfoHash key, Value::Filter f = Value::AllFilter(), Where w = {}) {
+ auto p = std::make_shared<std::promise<std::vector<std::shared_ptr< dht::Value >>>>();
+ auto values = std::make_shared<std::vector<std::shared_ptr< dht::Value >>>();
+ get(key, [=](const std::vector<std::shared_ptr<dht::Value>>& vlist) {
+ values->insert(values->end(), vlist.begin(), vlist.end());
+ return true;
+ }, [=](bool) {
+ p->set_value(std::move(*values));
+ },
+ f, w);
+ return p->get_future();
+ }
+
+ template <class T>
+ std::future<std::vector<T>> get(InfoHash key) {
+ auto p = std::make_shared<std::promise<std::vector<T>>>();
+ auto values = std::make_shared<std::vector<T>>();
+ get<T>(key, [=](T&& v) {
+ values->emplace_back(std::move(v));
+ return true;
+ }, [=](bool) {
+ p->set_value(std::move(*values));
+ });
+ return p->get_future();
+ }
+
+ void query(const InfoHash& hash, QueryCallback cb, DoneCallback done_cb = {}, Query q = {});
+ void query(const InfoHash& hash, QueryCallback cb, DoneCallbackSimple done_cb = {}, Query q = {}) {
+ query(hash, cb, bindDoneCb(done_cb), q);
+ }
+
+ std::future<size_t> listen(InfoHash key, ValueCallback vcb, Value::Filter f = Value::AllFilter(), Where w = {});
+
+ std::future<size_t> listen(InfoHash key, GetCallback cb, Value::Filter f={}, Where w={}) {
+ return listen(key, [cb](const std::vector<Sp<Value>>& vals, bool expired){
+ if (not expired)
+ return cb(vals);
+ return true;
+ }, std::forward<Value::Filter>(f), std::forward<Where>(w));
+ }
+ std::future<size_t> listen(const std::string& key, GetCallback vcb, Value::Filter f = Value::AllFilter(), Where w = {});
+ std::future<size_t> listen(InfoHash key, GetCallbackSimple cb, Value::Filter f = Value::AllFilter(), Where w = {}) {
+ return listen(key, bindGetCb(cb), f, w);
+ }
+
+ template <class T>
+ std::future<size_t> listen(InfoHash hash, std::function<bool(std::vector<T>&&)> cb)
+ {
+ return listen(hash, [=](const std::vector<std::shared_ptr<Value>>& vals) {
+ return cb(unpackVector<T>(vals));
+ },
+ getFilterSet<T>());
+ }
+ template <class T>
+ std::future<size_t> listen(InfoHash hash, std::function<bool(std::vector<T>&&, bool)> cb)
+ {
+ return listen(hash, [=](const std::vector<std::shared_ptr<Value>>& vals, bool expired) {
+ return cb(unpackVector<T>(vals), expired);
+ },
+ getFilterSet<T>());
+ }
+
+ template <typename T>
+ std::future<size_t> listen(InfoHash hash, std::function<bool(T&&)> cb, Value::Filter f = Value::AllFilter(), Where w = {})
+ {
+ return listen(hash, [=](const std::vector<std::shared_ptr<Value>>& vals) {
+ for (const auto& v : vals) {
+ try {
+ if (not cb(Value::unpack<T>(*v)))
+ return false;
+ } catch (const std::exception&) {
+ continue;
+ }
+ }
+ return true;
+ },
+ getFilterSet<T>(f), w);
+ }
+ template <typename T>
+ std::future<size_t> listen(InfoHash hash, std::function<bool(T&&, bool)> cb, Value::Filter f = Value::AllFilter(), Where w = {})
+ {
+ return listen(hash, [=](const std::vector<std::shared_ptr<Value>>& vals, bool expired) {
+ for (const auto& v : vals) {
+ try {
+ if (not cb(Value::unpack<T>(*v), expired))
+ return false;
+ } catch (const std::exception&) {
+ continue;
+ }
+ }
+ return true;
+ },
+ getFilterSet<T>(f), w);
+ }
+
+ void cancelListen(InfoHash h, size_t token);
+ void cancelListen(InfoHash h, std::shared_future<size_t> token);
+
+ void put(InfoHash hash, std::shared_ptr<Value> value, DoneCallback cb={}, time_point created=time_point::max(), bool permanent = false);
+ void put(InfoHash hash, std::shared_ptr<Value> value, DoneCallbackSimple cb, time_point created=time_point::max(), bool permanent = false) {
+ put(hash, value, bindDoneCb(cb), created, permanent);
+ }
+
+ void put(InfoHash hash, Value&& value, DoneCallback cb={}, time_point created=time_point::max(), bool permanent = false);
+ void put(InfoHash hash, Value&& value, DoneCallbackSimple cb, time_point created=time_point::max(), bool permanent = false) {
+ put(hash, std::forward<Value>(value), bindDoneCb(cb), created, permanent);
+ }
+ void put(const std::string& key, Value&& value, DoneCallbackSimple cb={}, time_point created=time_point::max(), bool permanent = false);
+
+ void cancelPut(const InfoHash& h, const Value::Id& id);
+
+ void putSigned(InfoHash hash, std::shared_ptr<Value> value, DoneCallback cb={});
+ void putSigned(InfoHash hash, std::shared_ptr<Value> value, DoneCallbackSimple cb) {
+ putSigned(hash, value, bindDoneCb(cb));
+ }
+
+ void putSigned(InfoHash hash, Value&& value, DoneCallback cb={});
+ void putSigned(InfoHash hash, Value&& value, DoneCallbackSimple cb) {
+ putSigned(hash, std::forward<Value>(value), bindDoneCb(cb));
+ }
+ void putSigned(const std::string& key, Value&& value, DoneCallbackSimple cb={});
+
+ void putEncrypted(InfoHash hash, InfoHash to, std::shared_ptr<Value> value, DoneCallback cb={});
+ void putEncrypted(InfoHash hash, InfoHash to, std::shared_ptr<Value> value, DoneCallbackSimple cb) {
+ putEncrypted(hash, to, value, bindDoneCb(cb));
+ }
+
+ void putEncrypted(InfoHash hash, InfoHash to, Value&& value, DoneCallback cb={});
+ void putEncrypted(InfoHash hash, InfoHash to, Value&& value, DoneCallbackSimple cb) {
+ putEncrypted(hash, to, std::forward<Value>(value), bindDoneCb(cb));
+ }
+ void putEncrypted(const std::string& key, InfoHash to, Value&& value, DoneCallback cb={});
+
+ /**
+ * Insert known nodes to the routing table, without necessarly ping them.
+ * Usefull to restart a node and get things running fast without putting load on the network.
+ */
+ void bootstrap(const std::vector<SockAddr>& nodes, DoneCallbackSimple&& cb={});
+ void bootstrap(const SockAddr& addr, DoneCallbackSimple&& cb={});
+
+ /**
+ * Insert known nodes to the routing table, without necessarly ping them.
+ * Usefull to restart a node and get things running fast without putting load on the network.
+ */
+ void bootstrap(const std::vector<NodeExport>& nodes);
+
+ /**
+ * Add host:service to bootstrap nodes, and ping this node.
+ * DNS resolution is performed asynchronously.
+ * When disconnected, all bootstrap nodes added with this method will be tried regularly until connection
+ * to the DHT network is established.
+ */
+ void bootstrap(const std::string& host, const std::string& service);
+
+ /**
+ * Clear the list of bootstrap added using bootstrap(const std::string&, const std::string&).
+ */
+ void clearBootstrap();
+
+ /**
+ * Inform the DHT of lower-layer connectivity changes.
+ * This will cause the DHT to assume an IP address change.
+ * The DHT will recontact neighbor nodes, re-register for listen ops etc.
+ */
+ void connectivityChanged();
+
+ void dumpTables() const;
+
+ /**
+ * Get the public key fingerprint if an identity is used with this node, 0 otherwise.
+ */
+ InfoHash getId() const;
+
+ /**
+ * Get the ID of the DHT node.
+ */
+ InfoHash getNodeId() const;
+
+ /**
+ * Returns the currently bound address.
+ * @param f: address family of the bound address to retreive.
+ */
+ const SockAddr& getBound(sa_family_t f = AF_INET) const {
+ return (f == AF_INET) ? bound4 : bound6;
+ }
+
+ /**
+ * Returns the currently bound port, in host byte order.
+ * @param f: address family of the bound port to retreive.
+ */
+ in_port_t getBoundPort(sa_family_t f = AF_INET) const {
+ return getBound(f).getPort();
+ }
+
+ std::pair<size_t, size_t> getStoreSize() const;
+
+ void setStorageLimit(size_t limit = DEFAULT_STORAGE_LIMIT);
+
+ std::vector<NodeExport> exportNodes() const;
+
+ std::vector<ValuesExport> exportValues() const;
+
+ void setLoggers(LogMethod err = NOLOG, LogMethod warn = NOLOG, LogMethod debug = NOLOG);
+
+ /**
+ * Only print logs related to the given InfoHash (if given), or disable filter (if zeroes).
+ */
+ void setLogFilter(const InfoHash& f = {});
+
+ void registerType(const ValueType& type);
+
+ void importValues(const std::vector<ValuesExport>& values);
+
+ bool isRunning() const {
+ return running;
+ }
+
+ NodeStats getNodesStats(sa_family_t af) const;
+ unsigned getNodesStats(sa_family_t af, unsigned *good_return, unsigned *dubious_return, unsigned *cached_return, unsigned *incoming_return) const;
+ NodeInfo getNodeInfo() const;
+
+ std::vector<unsigned> getNodeMessageStats(bool in = false) const;
+ std::string getStorageLog() const;
+ std::string getStorageLog(const InfoHash&) const;
+ std::string getRoutingTablesLog(sa_family_t af) const;
+ std::string getSearchesLog(sa_family_t af = AF_UNSPEC) const;
+ std::string getSearchLog(const InfoHash&, sa_family_t af = AF_UNSPEC) const;
+ std::vector<SockAddr> getPublicAddress(sa_family_t af = AF_UNSPEC);
+ std::vector<std::string> getPublicAddressStr(sa_family_t af = AF_UNSPEC);
+
+ // securedht methods
+
+ void findCertificate(InfoHash hash, std::function<void(const std::shared_ptr<crypto::Certificate>)>);
+ void registerCertificate(std::shared_ptr<crypto::Certificate> cert);
+ void setLocalCertificateStore(CertificateStoreQuery&& query_method);
+
+ /**
+ * @param port: Local port to bind. Both IPv4 and IPv6 will be tried (ANY).
+ * @param identity: RSA key pair to use for cryptographic operations.
+ * @param threaded: If false, ::loop() must be called periodically. Otherwise a thread is launched.
+ * @param cb: Optional callback to receive general state information.
+ */
+ void run(in_port_t port = 4222, const crypto::Identity identity = {}, bool threaded = false, NetId network = 0) {
+ run(port, {
+ /*.dht_config = */{
+ /*.node_config = */{
+ /*.node_id = */{},
+ /*.network = */network,
+ /*.is_bootstrap = */false,
+ /*.maintain_storage*/false
+ },
+ /*.id = */identity
+ },
+ /*.threaded = */threaded,
+ /*.proxy_server = */"",
+ /*.push_node_id = */""
+ });
+ }
+ void run(in_port_t port, Config config);
+
+ /**
+ * @param local4: Local IPv4 address and port to bind. Can be null.
+ * @param local6: Local IPv6 address and port to bind. Can be null.
+ * You should allways bind to a global IPv6 address.
+ * @param identity: RSA key pair to use for cryptographic operations.
+ * @param threaded: If false, loop() must be called periodically. Otherwise a thread is launched.
+ * @param cb: Optional callback to receive general state information.
+ */
+ void run(const SockAddr& local4, const SockAddr& local6, Config config);
+
+ /**
+ * Same as @run(sockaddr_in, sockaddr_in6, Identity, bool, StatusCallback), but with string IP addresses and service (port).
+ */
+ void run(const char* ip4, const char* ip6, const char* service, Config config);
+
+ void setOnStatusChanged(StatusCallback&& cb) {
+ statusCb = std::move(cb);
+ }
+
+ /**
+ * In non-threaded mode, the user should call this method
+ * regularly and everytime a new packet is received.
+ * @return the next op
+ */
+ time_point loop() {
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ time_point wakeup = time_point::min();
+ try {
+ wakeup = loop_();
+ } catch (const dht::SocketException& e) {
+ startNetwork(bound4, bound6);
+ }
+ return wakeup;
+ }
+
+ /**
+ * Gracefuly disconnect from network.
+ */
+ void shutdown(ShutdownCallback cb);
+
+ /**
+ * Quit and wait for all threads to terminate.
+ * No callbacks will be called after this method returns.
+ * All internal state will be lost. The DHT can then be run again with @run().
+ */
+ void join();
+
+ void setProxyServer(const std::string& proxy, const std::string& pushNodeId = "");
+
+ /**
+ * Start or stop the proxy
+ * @param proxify if we want to use the proxy
+ * @param deviceKey non empty to enable push notifications
+ */
+ void enableProxy(bool proxify);
+
+ /* Push notification methods */
+
+ /**
+ * Updates the push notification device token
+ */
+ void setPushNotificationToken(const std::string& token);
+
+ /**
+ * Insert a push notification to process for OpenDHT
+ */
+ void pushNotificationReceived(const std::map<std::string, std::string>& data);
+
+ /* Proxy server mothods */
+ void forwardAllMessages(bool forward);
+
+private:
+ static constexpr std::chrono::seconds BOOTSTRAP_PERIOD {10};
+
+ /**
+ * Will try to resolve the list of hostnames `bootstrap_nodes` on seperate
+ * thread and then queue ping requests. This list should contain reliable
+ * nodes so that the DHT node can recover quickly from losing connection
+ * with the network.
+ */
+ void tryBootstrapContinuously();
+
+ void stopNetwork();
+ void startNetwork(const SockAddr sin4, const SockAddr sin6);
+ time_point loop_();
+
+ NodeStatus getStatus() const {
+ return std::max(status4, status6);
+ }
+
+ /** Local DHT instance */
+ std::unique_ptr<SecureDht> dht_;
+
+ /** Proxy client instance */
+ std::unique_ptr<SecureDht> dht_via_proxy_;
+
+ /** true if we are currently using a proxy */
+ std::atomic_bool use_proxy {false};
+
+ /** Current configuration */
+ Config config_;
+
+ /**
+ * reset dht clients
+ */
+ void resetDht();
+ /**
+ * @return the current active DHT
+ */
+ SecureDht* activeDht() const;
+
+ /**
+ * Store current listeners and translates global tokens for each client.
+ */
+ struct Listener;
+ std::map<size_t, Listener> listeners_;
+ size_t listener_token_ {1};
+
+ mutable std::mutex dht_mtx {};
+ std::thread dht_thread {};
+ std::condition_variable cv {};
+
+ std::thread rcv_thread {};
+ std::mutex sock_mtx {};
+
+ struct ReceivedPacket {
+ Blob data;
+ SockAddr from;
+ time_point received;
+ };
+ std::queue<ReceivedPacket> rcv {};
+
+ /** true if currently actively boostraping */
+ std::atomic_bool bootstraping {false};
+ /* bootstrap nodes given as (host, service) pairs */
+ std::vector<std::pair<std::string,std::string>> bootstrap_nodes_all {};
+ std::vector<std::pair<std::string,std::string>> bootstrap_nodes {};
+ std::thread bootstrap_thread {};
+ /** protects bootstrap_nodes, bootstrap_thread */
+ std::mutex bootstrap_mtx {};
+ std::condition_variable bootstrap_cv {};
+
+ std::queue<std::function<void(SecureDht&)>> pending_ops_prio {};
+ std::queue<std::function<void(SecureDht&)>> pending_ops {};
+ std::mutex storage_mtx {};
+
+ std::atomic_bool running {false};
+ std::atomic_bool running_network {false};
+
+ NodeStatus status4 {NodeStatus::Disconnected},
+ status6 {NodeStatus::Disconnected};
+ StatusCallback statusCb {nullptr};
+
+ int stop_writefd {-1};
+ int s4 {-1}, s6 {-1};
+ SockAddr bound4 {};
+ SockAddr bound6 {};
+
+ /** Push notification token */
+ std::string pushToken_;
+};
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ * Simon Désaulniers <simon.desaulniers@savoirfairelinux.com>
+ * Nicolas Reynaud <nicolas.reynaud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include <string>
+#include <vector>
+#include <memory>
+#include <map>
+#include <functional>
+#include <stdexcept>
+#include <bitset>
+#include <iostream>
+#include <sstream>
+
+#include "../value.h"
+#include "../dhtrunner.h"
+#include "../infohash.h"
+
+namespace dht {
+namespace indexation {
+
+/*!
+ * @class Prefix
+ * @brief A blob structure which prefixes a Key in the PHT.
+ * @details
+ * Since the PHT structure is a "trie", every node in this structure have a
+ * label which is defined by the path from the root of the trie to the node. If
+ * the node in question is a leaf, *the label is a prefix of all the keys
+ * contained in the leaf*.
+ */
+struct OPENDHT_PUBLIC Prefix {
+ Prefix() {}
+ Prefix(InfoHash h) : size_(h.size() * 8), content_(h.begin(), h.end()) { }
+ Prefix(const Blob& d, const Blob& f={}) : size_(d.size()*8), flags_(f), content_(d) { }
+
+ Prefix(const Prefix& p, size_t first) :
+ size_(std::min(first, p.content_.size()*8)),
+ content_(Blob(p.content_.begin(), p.content_.begin()+size_/8))
+ {
+
+ auto rem = size_ % 8;
+ if ( not p.flags_.empty() ) {
+ flags_ = Blob(p.flags_.begin(), p.flags_.begin()+size_/8);
+ if (rem)
+ flags_.push_back(p.flags_[size_/8] & (0xFF << (8 - rem)));
+ }
+
+ if (rem)
+ content_.push_back(p.content_[size_/8] & (0xFF << (8 - rem)));
+ }
+
+ /**
+ * Get a sub prefix of the Prefix
+ *
+ * @param len lenght of the prefix to get, could be negative
+ * if len is negativ then you will get the prefix
+ * of size of the previous prefix minus len
+ *
+ * @return Sub-prefix of size len or if len is negative sub-prefix of size
+ * of prefix minus len
+ *
+ * @throw out_of_range if len is larger than size of the content
+ */
+ Prefix getPrefix(ssize_t len) const {
+ if ((size_t)std::abs(len) >= content_.size() * 8)
+ throw std::out_of_range("len larger than prefix size.");
+ if (len < 0)
+ len += size_;
+
+ return Prefix(*this, len);
+ }
+
+ /**
+ * Flags are considered as active if flag is empty or if the flag
+ * at pos 'pos' is active
+ee *
+ * @see isActiveBit in private function
+ */
+ bool isFlagActive(size_t pos) const {
+ return flags_.empty() or isActiveBit(flags_, pos);
+ }
+
+ /**
+ * @see isActiveBit in private function
+ */
+ bool isContentBitActive(size_t pos) const {
+ return isActiveBit(content_, pos);
+ }
+
+ Prefix getFullSize() { return Prefix(*this, content_.size()*8); }
+
+ /**
+ * This methods gets the prefix of its sibling in the PHT structure.
+ *
+ * @return The prefix of this sibling.
+ */
+ Prefix getSibling() const {
+ Prefix copy = *this;
+ if ( size_ )
+ copy.swapContentBit(size_ - 1);
+
+ return copy;
+ }
+
+ InfoHash hash() const {
+ Blob copy(content_);
+ copy.push_back(size_);
+ return InfoHash::get(copy);
+ }
+
+ /**
+ * This method count total of bit in common between 2 prefix
+ *
+ * @param p1 first prefix to compared
+ * @param p2 second prefix to compared
+ * @return Lenght of the larger common prefix between both
+ */
+ static inline unsigned commonBits(const Prefix& p1, const Prefix& p2) {
+ unsigned i, j;
+ uint8_t x;
+ auto longest_prefix_size = std::min(p1.size_, p2.size_);
+
+ for (i = 0; i < longest_prefix_size; i++) {
+ if (p1.content_.data()[i] != p2.content_.data()[i]
+ or not p1.isFlagActive(i)
+ or not p2.isFlagActive(i) ) {
+
+ break;
+ }
+ }
+
+ if (i == longest_prefix_size)
+ return 8*longest_prefix_size;
+
+ x = p1.content_.data()[i] ^ p2.content_.data()[i];
+
+ j = 0;
+ while ((x & 0x80) == 0) {
+ x <<= 1;
+ j++;
+ }
+
+ return 8 * i + j;
+ }
+
+ /**
+ * @see doc of swap private function
+ */
+ void swapContentBit(size_t bit) {
+ swapBit(content_, bit);
+ }
+
+ /**
+ * @see doc of swap private function
+ */
+ void swapFlagBit(size_t bit) {
+ swapBit(flags_, bit);
+ }
+
+ /**
+ * @see doc of addPadding private function
+ */
+ void addPaddingContent(size_t size) {
+ content_ = addPadding(content_, size);
+ }
+
+ void updateFlags() {
+ /* Fill first known bit */
+ auto csize = size_ - flags_.size() * 8;
+ while(csize >= 8) {
+ flags_.push_back(0xFF);
+ csize -= 8;
+ }
+
+ /* if needed fill remaining bit */
+ if ( csize )
+ flags_.push_back(0xFF << (8 - csize));
+
+ /* Complet vector space missing */
+ for ( auto i = flags_.size(); i < content_.size(); i++ )
+ flags_.push_back(0xFF);
+ }
+
+ std::string toString() const;
+
+ size_t size_ {0};
+
+ /* Will contain flags according to content_.
+ If flags_[i] == 0, then content_[i] is unknown
+ else if flags_[i] == 1, then content_[i] is known */
+ Blob flags_ {};
+ Blob content_ {};
+
+private:
+
+ /**
+ * Add a padding to the input blob
+ *
+ * @param toP : Prefix where to add a padding
+ * @param size : Final size of the prefix with padding
+ *
+ * @return Copy of the input Blob but with a padding
+ */
+ Blob addPadding(Blob toP, size_t size) {
+ Blob copy = toP;
+ for ( auto i = copy.size(); i < size; i++ )
+ copy.push_back(0);
+
+ swapBit(copy, size_ + 1);
+ return copy;
+ }
+
+ /**
+ * Check if the bit a pos 'pos' is active, i.e. equal to 1
+ *
+ * @param b : Blob to check
+ * @param pos : Position to check
+ *
+ * @return true if the bit is equal to 1, false otherwise
+ *
+ * @throw out_of_range if bit is superior to blob size * 8
+ */
+ bool isActiveBit(const Blob &b, size_t pos) const {
+ if ( pos >= content_.size() * 8 )
+ throw std::out_of_range("Can't detect active bit at pos, pos larger than prefix size or empty prefix");
+
+ return ((b[pos / 8] >> (7 - (pos % 8)) ) & 1) == 1;
+ }
+
+ /**
+ * Swap bit at position bit [from 0 to 1 and vice-versa]
+ *
+ * @param b : Blob to swap
+ * @param bit : Bit to swap on b
+ *
+ * @return the input prefix with the bit at pos 'bit' swapped
+ *
+ * @throw out_of_range if bit is superior to blob size * 8
+ */
+ void swapBit(Blob &b, size_t bit) {
+ if ( bit >= b.size() * 8 )
+ throw std::out_of_range("bit larger than prefix size.");
+
+ size_t offset_bit = (8 - bit) % 8;
+ b[bit / 8] ^= (1 << offset_bit);
+ }
+};
+
+using Value = std::pair<InfoHash, dht::Value::Id>;
+struct OPENDHT_PUBLIC IndexEntry : public dht::Value::Serializable<IndexEntry> {
+ static const ValueType TYPE;
+
+ virtual void unpackValue(const dht::Value& v) {
+ Serializable<IndexEntry>::unpackValue(v);
+ name = v.user_type;
+ }
+
+ virtual dht::Value packValue() const {
+ auto pack = Serializable<IndexEntry>::packValue();
+ pack.user_type = name;
+ return pack;
+ }
+
+ Blob prefix;
+ Value value;
+ std::string name;
+ MSGPACK_DEFINE_MAP(prefix, value)
+};
+
+class OPENDHT_PUBLIC Pht {
+ static constexpr const char* INVALID_KEY = "Key does not match the PHT key spec.";
+
+ /* Prefixes the user_type for all dht values put on the DHT */
+ static constexpr const char* INDEX_PREFIX = "index.pht.";
+
+public:
+
+ /* This is the maximum number of entries per node. This parameter is
+ * critical and influences the traffic a lot during a lookup operation.
+ */
+ static constexpr const size_t MAX_NODE_ENTRY_COUNT {16};
+
+ /* A key for a an index entry */
+ using Key = std::map<std::string, Blob>;
+
+ /* Specifications of the keys. It defines the number, the length and the
+ * serialization order of fields. */
+ using KeySpec = std::map<std::string, size_t>;
+ using LookupCallback = std::function<void(std::vector<std::shared_ptr<Value>>& values, const Prefix& p)>;
+
+ typedef void (*LookupCallbackRaw)(std::vector<std::shared_ptr<Value>>* values, Prefix* p, void *user_data);
+ static LookupCallback
+ bindLookupCb(LookupCallbackRaw raw_cb, void* user_data) {
+ if (not raw_cb) return {};
+ return [=](std::vector<std::shared_ptr<Value>>& values, const Prefix& p) {
+ raw_cb((std::vector<std::shared_ptr<Value>>*) &values, (Prefix*) &p, user_data);
+ };
+ }
+ using LookupCallbackSimple = std::function<void(std::vector<std::shared_ptr<Value>>& values)>;
+ typedef void (*LookupCallbackSimpleRaw)(std::vector<std::shared_ptr<Value>>* values, void *user_data);
+ static LookupCallbackSimple
+ bindLookupCbSimple(LookupCallbackSimpleRaw raw_cb, void* user_data) {
+ if (not raw_cb) return {};
+ return [=](std::vector<std::shared_ptr<Value>>& values) {
+ raw_cb((std::vector<std::shared_ptr<Value>>*) &values, user_data);
+ };
+ }
+
+ Pht(std::string name, KeySpec k_spec, std::shared_ptr<DhtRunner> dht)
+ : name_(INDEX_PREFIX + name), canary_(name_ + ".canary"), keySpec_(k_spec), dht_(dht) {}
+
+ virtual ~Pht () { }
+
+ /**
+ * Lookup a key for a value.
+ */
+ void lookup(Key k, LookupCallback cb = {}, DoneCallbackSimple done_cb = {}, bool exact_match = true);
+ void lookup(Key k, LookupCallbackSimple cb = {}, DoneCallbackSimple done_cb = {}, bool exact_match = true)
+ {
+ lookup(k, [=](std::vector<std::shared_ptr<Value>>& values, Prefix) { cb(values); }, done_cb, exact_match);
+ }
+
+ /**
+ * Wrapper function which call the private one.
+ *
+ * @param k : Key to insert [i.e map of string, blob]
+ * @param v : Value to insert
+ * @param done_cb : Callbakc which going to be call when all the insert is done
+ */
+ void insert(Key k, Value v, DoneCallbackSimple done_cb = {}) {
+ Prefix p = linearize(k);
+
+ auto lo = std::make_shared<int>(0);
+ auto hi = std::make_shared<int>(p.size_);
+
+ IndexEntry entry;
+ entry.value = v;
+ entry.prefix = p.content_;
+ entry.name = name_;
+
+ Pht::insert(p, entry, lo, hi, clock::now(), true, done_cb);
+ }
+
+private:
+
+ /**
+ * Insert function which really insert onto the pht
+ *
+ * @param kp : Prefix to insert (linearize the the key)
+ * @param entry : Entry created from the value
+ * @param lo : Lowest point to start in the prefix
+ * @param hi : Highest point to end in the prefix
+ * @param time_p : Timepoint to use for the insertion into the dht (must be < now)
+ * @param check_split : If this flag is true then the algoritm will not use the merge algorithm
+ * @param done_cb : Callback to call when the insert is done
+ */
+
+ void insert(const Prefix& kp, IndexEntry entry, std::shared_ptr<int> lo, std::shared_ptr<int> hi, time_point time_p,
+ bool check_split, DoneCallbackSimple done_cb = {});
+
+ class Cache {
+ public:
+ /**
+ * Insert all needed node into the tree according to a prefix
+ * @param p : Prefix that we need to insert
+ */
+ void insert(const Prefix& p);
+
+ /**
+ * Lookup into the tree to return the maximum prefix length in the cache tree
+ *
+ * @param p : Prefix that we are looking for
+ *
+ * @return : The size of the longest prefix known in the cache between 0 and p.size_
+ */
+
+ int lookup(const Prefix& p);
+
+ private:
+ static constexpr const size_t MAX_ELEMENT {1024};
+ static constexpr const std::chrono::minutes NODE_EXPIRE_TIME {5};
+
+ struct Node {
+ time_point last_reply; /* Made the assocation between leaves and leaves multimap */
+ std::shared_ptr<Node> parent; /* Share_ptr to the parent, it allow the self destruction of tree */
+ std::weak_ptr<Node> left_child; /* Left child, for bit equal to 1 */
+ std::weak_ptr<Node> right_child; /* Right child, for bit equal to 0 */
+ };
+
+ std::weak_ptr<Node> root_; /* Root of the tree */
+
+ /**
+ * This mutlimap contains all prefix insert in the tree in time order
+ * We could then delete the last one if there is too much node
+ * The tree will self destroy is branch ( thanks to share_ptr )
+ */
+ std::multimap<time_point, std::shared_ptr<Node>> leaves_;
+ };
+
+ /* Callback used for insert value by using the pht */
+ using RealInsertCallback = std::function<void(const Prefix& p, IndexEntry entry)>;
+ using LookupCallbackWrapper = std::function<void(std::vector<std::shared_ptr<IndexEntry>>& values, const Prefix& p)>;
+
+ /**
+ * Performs a step in the lookup operation. Each steps are performed
+ * asynchronously.
+ *
+ * @param k : Prefix on which the lookup is performed
+ * @param lo : lowest bound on the prefix (where to start)
+ * @param hi : highest bound on the prefix (where to stop)
+ * @param vals : Shared ptr to a vector of IndexEntry (going to contains all values found)
+ * @param cb : Callback to use at the end of the lookupStep (call on the value of vals)
+ * @param done_cb : Callback at the end of the lookupStep
+ * @param max_common_prefix_len: used in the inexacte lookup match case, indicate the longest common prefix found
+ * @param start : If start is set then lo and hi will be ignore for the first step, if the step fail lo and hi will be used
+ * @param all_values : If all value is true then all value met during the lookupstep will be in the vector vals
+ */
+ void lookupStep(Prefix k, std::shared_ptr<int> lo, std::shared_ptr<int> hi,
+ std::shared_ptr<std::vector<std::shared_ptr<IndexEntry>>> vals,
+ LookupCallbackWrapper cb, DoneCallbackSimple done_cb,
+ std::shared_ptr<unsigned> max_common_prefix_len,
+ int start = -1, bool all_values = false);
+
+ /**
+ * Apply the zcurve algorithm on the list of input prefix
+ *
+ * @param all_prefix : Vector of prefix to interleave
+ *
+ * @return The output prefix where all flags and content are interleaves
+ */
+ Prefix zcurve(const std::vector<Prefix>& all_prefix) const;
+
+ /**
+ * Linearizes the key into a unidimensional key. A pht only takes
+ * unidimensional key.
+ *
+ * @param Key The initial key.
+ *
+ * @return the prefix of the linearized key.
+ */
+ virtual Prefix linearize(Key k) const;
+
+ /**
+ * Looking where to put the data cause if there is free space on the node
+ * above then this node will became the real leave.
+ *
+ * @param p Share_ptr on the Prefix to check
+ * @param entry The entry to put at the prefix p
+ * @param end_cb Callback to use at the end of counting
+ */
+ void getRealPrefix(const std::shared_ptr<Prefix>& p, IndexEntry entry, RealInsertCallback end_cb );
+
+ /**
+ * Looking where to put the data cause if there is free space on the node
+ * above then this node will became the real leave.
+ *
+ * @param p Share_ptr on the Prefix to check
+ * @param entry The entry to put at the prefix p
+ * @param end_cb Callback to use at the end of counting
+ */
+ void checkPhtUpdate(Prefix p, IndexEntry entry, time_point time_p);
+
+ /**
+ * Search for the split location by comparing 'compared' to all values in vals.
+ *
+ * @param compared : Value which going to be compared
+ * @param vals : The vector of values to compare with comapred
+ * @return position compared diverge from all others
+ */
+ static size_t findSplitLocation(const Prefix& compared, const std::vector<std::shared_ptr<IndexEntry>>& vals) {
+ for ( size_t i = 0; i < compared.content_.size() * 8 - 1; i++ )
+ for ( auto const& v : vals)
+ if ( Prefix(v->prefix).isContentBitActive(i) != compared.isContentBitActive(i) )
+ return i + 1;
+ return compared.content_.size() * 8 - 1;
+ }
+
+ /**
+ * Put canary from the split point until the last known canary and add the prefix at the good place
+ *
+ * @param insert : Prefix to insertm but also prefix which going to check where we need to split
+ * @param vals : Vector of vals for the comparaison
+ * @param entry : Entry to put on the pht
+ * @param end_cb : Callback to apply to the insert prefi (here does the insert)
+ */
+ void split(const Prefix& insert, const std::vector<std::shared_ptr<IndexEntry>>& vals, IndexEntry entry, RealInsertCallback end_cb);
+
+ /**
+ * Tells if the key is valid according to the key spec.
+ */
+ bool validKey(const Key& k) const {
+ return k.size() == keySpec_.size() and
+ std::equal(k.begin(), k.end(), keySpec_.begin(),
+ [&](const Key::value_type& key, const KeySpec::value_type& key_spec) {
+ return key.first == key_spec.first and key.second.size() <= key_spec.second;
+ }
+ );
+ }
+
+ /**
+ * Updates the canary token on the node responsible for the specified
+ * Prefix.
+ */
+ void updateCanary(Prefix p);
+
+ const std::string name_;
+ const std::string canary_;
+ const KeySpec keySpec_;
+ Cache cache_;
+ std::shared_ptr<DhtRunner> dht_;
+};
+
+} /* indexation */
+} /* dht */
+
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "def.h"
+#include "rng.h"
+
+#include <msgpack.hpp>
+
+#ifndef _WIN32
+#include <netinet/in.h>
+#include <netdb.h>
+#ifdef __ANDROID__
+typedef uint16_t in_port_t;
+#endif
+#else
+#include <iso646.h>
+#include <ws2tcpip.h>
+typedef uint16_t sa_family_t;
+typedef uint16_t in_port_t;
+#endif
+
+#include <iostream>
+#include <iomanip>
+#include <array>
+#include <vector>
+#include <algorithm>
+#include <stdexcept>
+#include <sstream>
+#include <cstring>
+
+namespace dht {
+
+using byte = uint8_t;
+
+namespace crypto {
+ OPENDHT_PUBLIC void hash(const uint8_t* data, size_t data_length, uint8_t* hash, size_t hash_length);
+}
+
+/**
+ * Represents an InfoHash.
+ * An InfoHash is a byte array of HASH_LEN bytes.
+ * InfoHashes identify nodes and values in the Dht.
+ */
+template <size_t N>
+class OPENDHT_PUBLIC Hash {
+public:
+ using T = std::array<uint8_t, N>;
+ typedef typename T::iterator iterator;
+ typedef typename T::const_iterator const_iterator;
+
+ Hash () {
+ data_.fill(0);
+ }
+ Hash (const uint8_t* h, size_t data_len) {
+ if (data_len < N)
+ data_.fill(0);
+ else
+ std::copy_n(h, N, data_.begin());
+ }
+ /**
+ * Constructor from an hexadecimal string (without "0x").
+ * hex must be at least 2.HASH_LEN characters long.
+ * If too long, only the first 2.HASH_LEN characters are read.
+ */
+ explicit Hash(const std::string& hex);
+
+ Hash(const msgpack::object& o) {
+ msgpack_unpack(o);
+ }
+
+ size_t size() const { return data_.size(); }
+ const uint8_t* data() const { return data_.data(); }
+ uint8_t* data() { return data_.data(); }
+ iterator begin() { return data_.begin(); }
+ const_iterator cbegin() const { return data_.cbegin(); }
+ iterator end() { return data_.end(); }
+ const_iterator cend() const { return data_.cend(); }
+
+ bool operator==(const Hash& h) const {
+ auto a = reinterpret_cast<const uint32_t*>(data_.data());
+ auto b = reinterpret_cast<const uint32_t*>(h.data_.data());
+ constexpr unsigned n = N / sizeof(uint32_t);
+ for (unsigned i=0; i < n; i++)
+ if (a[i] != b[i])
+ return false;
+ return true;
+ }
+ bool operator!=(const Hash& h) const { return !(*this == h); }
+
+ bool operator<(const Hash& o) const {
+ for(unsigned i = 0; i < N; i++) {
+ if(data_[i] != o.data_[i])
+ return data_[i] < o.data_[i];
+ }
+ return false;
+ }
+
+ explicit operator bool() const {
+ auto a = reinterpret_cast<const uint32_t*>(data_.data());
+ auto b = reinterpret_cast<const uint32_t*>(data_.data() + N);
+ for (; a != b; a++) {
+ if (*a)
+ return true;
+ }
+ return false;
+ }
+
+ uint8_t& operator[](size_t index) { return data_[index]; }
+ const uint8_t& operator[](size_t index) const { return data_[index]; }
+
+ /**
+ * Find the lowest 1 bit in an id.
+ * Result will allways be lower than 8*N
+ */
+ inline int lowbit() const {
+ int i, j;
+ for(i = N-1; i >= 0; i--)
+ if(data_[i] != 0)
+ break;
+ if(i < 0)
+ return -1;
+ for(j = 7; j >= 0; j--)
+ if((data_[i] & (0x80 >> j)) != 0)
+ break;
+ return 8 * i + j;
+ }
+
+ /**
+ * Forget about the ``XOR-metric''. An id is just a path from the
+ * root of the tree, so bits are numbered from the start.
+ */
+ static inline int cmp(const Hash& id1, const Hash& id2) {
+ return std::memcmp(id1.data_.data(), id2.data_.data(), N);
+ }
+
+ /** Find how many bits two ids have in common. */
+ static inline unsigned
+ commonBits(const Hash& id1, const Hash& id2)
+ {
+ unsigned i, j;
+ uint8_t x;
+ for(i = 0; i < N; i++) {
+ if(id1.data_[i] != id2.data_[i])
+ break;
+ }
+
+ if(i == N)
+ return 8*N;
+
+ x = id1.data_[i] ^ id2.data_[i];
+
+ j = 0;
+ while((x & 0x80) == 0) {
+ x <<= 1;
+ j++;
+ }
+
+ return 8 * i + j;
+ }
+
+ /** Determine whether id1 or id2 is closer to this */
+ int
+ xorCmp(const Hash& id1, const Hash& id2) const
+ {
+ for(unsigned i = 0; i < N; i++) {
+ uint8_t xor1, xor2;
+ if(id1.data_[i] == id2.data_[i])
+ continue;
+ xor1 = id1.data_[i] ^ data_[i];
+ xor2 = id2.data_[i] ^ data_[i];
+ if(xor1 < xor2)
+ return -1;
+ else
+ return 1;
+ }
+ return 0;
+ }
+
+ bool
+ getBit(unsigned nbit) const
+ {
+ auto& num = *(data_.cbegin()+(nbit/8));
+ unsigned bit = 7 - (nbit % 8);
+ return (num >> bit) & 1;
+ }
+
+ void
+ setBit(unsigned nbit, bool b)
+ {
+ auto& num = data_[nbit/8];
+ unsigned bit = 7 - (nbit % 8);
+ num ^= (-b ^ num) & (1 << bit);
+ }
+
+ double toFloat() const {
+ using D = size_t;
+ double v = 0.;
+ for (size_t i = 0; i < std::min<size_t>(N, sizeof(D)-1); i++)
+ v += *(data_.cbegin()+i) / (double)((D)1 << 8*(i+1));
+ return v;
+ }
+
+ static inline Hash get(const std::string& data) {
+ return get((const uint8_t*)data.data(), data.size());
+ }
+
+ static inline Hash get(const std::vector<uint8_t>& data) {
+ return get(data.data(), data.size());
+ }
+
+ /**
+ * Computes the hash from a given data buffer of size data_len.
+ */
+ static Hash get(const uint8_t* data, size_t data_len)
+ {
+ Hash ret;
+ crypto::hash(data, data_len, ret.data(), N);
+ return ret;
+ }
+
+ static Hash getRandom();
+
+ template <size_t M>
+ OPENDHT_PUBLIC friend std::ostream& operator<< (std::ostream& s, const Hash<M>& h);
+
+ template <size_t M>
+ OPENDHT_PUBLIC friend std::istream& operator>> (std::istream& s, Hash<M>& h);
+
+ const char* to_c_str() const;
+
+ std::string toString() const;
+
+ template <typename Packer>
+ void msgpack_pack(Packer& pk) const
+ {
+ pk.pack_bin(N);
+ pk.pack_bin_body((char*)data_.data(), N);
+ }
+
+ void msgpack_unpack(msgpack::object o) {
+ if (o.type != msgpack::type::BIN or o.via.bin.size != N)
+ throw msgpack::type_error();
+ std::copy_n(o.via.bin.ptr, N, data_.data());
+ }
+private:
+ T data_;
+ void fromString(const char*);
+};
+
+#define HASH_LEN 20u
+using InfoHash = Hash<HASH_LEN>;
+using h256 = Hash<32>;
+using PkId = h256;
+
+template <size_t N>
+std::ostream& operator<< (std::ostream& s, const Hash<N>& h)
+{
+ s.write(h.to_c_str(), N*2);
+ return s;
+}
+
+template <size_t N>
+std::istream& operator>> (std::istream& s, Hash<N>& h)
+{
+ std::array<char, h.size()*2> dat;
+ s.exceptions(std::istream::eofbit | std::istream::failbit);
+ s.read(&(*dat.begin()), dat.size());
+ fromString(dat.data());
+ return s;
+}
+
+template <size_t N>
+Hash<N>::Hash(const std::string& hex) {
+ if (hex.size() < 2*N)
+ data_.fill(0);
+ else
+ fromString(hex.c_str());
+}
+
+template <size_t N>
+void
+Hash<N>::fromString(const char* in) {
+ auto hex2bin = [](char c) -> uint8_t {
+ if (c >= 'a' and c <= 'f') return 10 + c - 'a';
+ else if (c >= 'A' and c <= 'F') return 10 + c - 'A';
+ else if (c >= '0' and c <= '9') return c - '0';
+ else throw std::domain_error("not an hex character");
+ };
+ try {
+ for (size_t i=0; i<N; i++)
+ data_[i] = (hex2bin(in[2*i]) << 4) | hex2bin(in[2*i+1]);
+ } catch (const std::domain_error&) {
+ data_.fill(0);
+ }
+}
+
+template <size_t N>
+Hash<N>
+Hash<N>::getRandom()
+{
+ Hash h;
+ crypto::random_device rdev;
+ std::uniform_int_distribution<uint32_t> rand_int;
+ auto a = reinterpret_cast<uint32_t*>(h.data());
+ auto b = reinterpret_cast<uint32_t*>(h.data() + h.size());
+ std::generate(a, b, std::bind(rand_int, std::ref(rdev)));
+ return h;
+}
+
+struct HexMap : public std::array<std::array<char, 2>, 256> {
+ HexMap() {
+ for (size_t i=0; i<size(); i++) {
+ auto& e = (*this)[i];
+ e[0] = hex_digits[(i >> 4) & 0x0F];
+ e[1] = hex_digits[i & 0x0F];
+ }
+ }
+private:
+ static constexpr const char* hex_digits = "0123456789abcdef";
+};
+
+OPENDHT_PUBLIC extern const HexMap hex_map;
+
+template <size_t N>
+const char*
+Hash<N>::to_c_str() const
+{
+ thread_local std::array<char, N*2+1> buf;
+ for (size_t i=0; i<N; i++) {
+ auto b = buf.data()+i*2;
+ const auto& m = hex_map[data_[i]];
+ *((uint16_t*)b) = *((uint16_t*)&m);
+ }
+ return buf.data();
+}
+
+template <size_t N>
+std::string
+Hash<N>::toString() const
+{
+ return std::string(to_c_str(), N*2);
+}
+
+const InfoHash zeroes {};
+
+struct OPENDHT_PUBLIC NodeExport {
+ InfoHash id;
+ sockaddr_storage ss;
+ socklen_t sslen;
+
+ template <typename Packer>
+ void msgpack_pack(Packer& pk) const
+ {
+ pk.pack_map(2);
+ pk.pack(std::string("id"));
+ pk.pack(id);
+ pk.pack(std::string("addr"));
+ pk.pack_bin(sslen);
+ pk.pack_bin_body((char*)&ss, sslen);
+ }
+
+ void msgpack_unpack(msgpack::object o);
+
+ OPENDHT_PUBLIC friend std::ostream& operator<< (std::ostream& s, const NodeExport& h);
+};
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ *
+ * Author: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "dhtrunner.h"
+
+#include <iostream>
+
+namespace dht {
+
+/**
+ * Logging-related functions
+ */
+namespace log {
+
+/**
+ * Terminal colors for logging
+ */
+namespace Color {
+ enum Code {
+ FG_RED = 31,
+ FG_GREEN = 32,
+ FG_YELLOW = 33,
+ FG_BLUE = 34,
+ FG_DEFAULT = 39,
+ BG_RED = 41,
+ BG_GREEN = 42,
+ BG_BLUE = 44,
+ BG_DEFAULT = 49
+ };
+ class Modifier {
+ const Code code;
+ public:
+ constexpr Modifier(Code pCode) : code(pCode) {}
+ friend std::ostream&
+ operator<<(std::ostream& os, const Modifier& mod) {
+ return os << "\033[" << mod.code << 'm';
+ }
+ };
+}
+
+constexpr const Color::Modifier def(Color::FG_DEFAULT);
+constexpr const Color::Modifier red(Color::FG_RED);
+constexpr const Color::Modifier yellow(Color::FG_YELLOW);
+
+/**
+ * Print va_list to std::ostream (used for logging).
+ */
+OPENDHT_PUBLIC void
+printLog(std::ostream &s, char const *m, va_list args);
+
+OPENDHT_PUBLIC void
+enableLogging(dht::DhtRunner &dht);
+
+OPENDHT_PUBLIC void
+enableFileLogging(dht::DhtRunner &dht, const std::string &path);
+
+OPENDHT_PUBLIC void
+disableLogging(dht::DhtRunner &dht);
+
+OPENDHT_PUBLIC void
+enableSyslog(dht::DhtRunner &dht, const char* name);
+
+} /* log */
+} /* dht */
--- /dev/null
+/*
+ * Copyright (C) 2016 Savoir-faire Linux Inc.
+ * Author : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "infohash.h"
+
+#ifndef OPENDHT_LOG
+#define OPENDHT_LOG true
+#endif
+
+namespace dht {
+
+// Logging related utility functions
+
+/**
+ * Dummy function used to disable logging
+ */
+inline void NOLOG(char const*, va_list) {}
+
+/**
+ * Wrapper for logging methods
+ */
+struct LogMethod {
+ LogMethod() = default;
+
+ LogMethod(LogMethod&& l) : func(std::move(l.func)) {}
+ LogMethod(const LogMethod& l) : func(l.func) {}
+
+ LogMethod& operator=(dht::LogMethod&& l) {
+ func = std::forward<LogMethod>(l.func);
+ return *this;
+ }
+ LogMethod& operator=(const dht::LogMethod& l) {
+ func = l.func;
+ return *this;
+ }
+
+ template<typename T>
+ explicit LogMethod(T&& t) : func(std::forward<T>(t)) {}
+
+ template<typename T>
+ LogMethod(const T& t) : func(t) {}
+
+ void operator()(char const* format, ...) const {
+ va_list args;
+ va_start(args, format);
+ func(format, args);
+ va_end(args);
+ }
+ void log(char const* format, va_list args) const {
+ func(format, args);
+ }
+ explicit operator bool() const {
+ return (bool)func;
+ }
+
+ void logPrintable(const uint8_t *buf, size_t buflen) const {
+ std::string buf_clean(buflen, '\0');
+ for (size_t i=0; i<buflen; i++)
+ buf_clean[i] = isprint(buf[i]) ? buf[i] : '.';
+ (*this)("%s", buf_clean.c_str());
+ }
+private:
+ std::function<void(char const*, va_list)> func;
+};
+
+struct Logger {
+ LogMethod DBG = NOLOG;
+ LogMethod WARN = NOLOG;
+ LogMethod ERR = NOLOG;
+ void setFilter(const InfoHash& f) {
+ filter_ = f;
+ filterEnable_ = static_cast<bool>(filter_);
+ }
+ inline void log0(const LogMethod& logger, char const* format, va_list args) const {
+#if OPENDHT_LOG
+ if (logger and not filterEnable_)
+ logger.log(format, args);
+#endif
+ }
+ inline void log1(const LogMethod& logger, const InfoHash& f, char const* format, va_list args) const {
+#if OPENDHT_LOG
+ if (logger and (not filterEnable_ or f == filter_))
+ logger.log(format, args);
+#endif
+ }
+ inline void log2(const LogMethod& logger, const InfoHash& f1, const InfoHash& f2, char const* format, va_list args) const {
+#if OPENDHT_LOG
+ if (logger and (not filterEnable_ or f1 == filter_ or f2 == filter_))
+ logger.log(format, args);
+#endif
+ }
+ inline void d(char const* format, ...) const {
+#if OPENDHT_LOG
+ va_list args;
+ va_start(args, format);
+ log0(DBG, format, args);
+ va_end(args);
+#endif
+ }
+ inline void d(const InfoHash& f, char const* format, ...) const {
+#if OPENDHT_LOG
+ va_list args;
+ va_start(args, format);
+ log1(DBG, f, format, args);
+ va_end(args);
+#endif
+ }
+ inline void d(const InfoHash& f1, const InfoHash& f2, char const* format, ...) const {
+#if OPENDHT_LOG
+ va_list args;
+ va_start(args, format);
+ log2(DBG, f1, f2, format, args);
+ va_end(args);
+#endif
+ }
+ inline void w(char const* format, ...) const {
+#if OPENDHT_LOG
+ va_list args;
+ va_start(args, format);
+ log0(WARN, format, args);
+ va_end(args);
+#endif
+ }
+ inline void w(const InfoHash& f, char const* format, ...) const {
+#if OPENDHT_LOG
+ va_list args;
+ va_start(args, format);
+ log1(WARN, f, format, args);
+ va_end(args);
+#endif
+ }
+ inline void w(const InfoHash& f1, const InfoHash& f2, char const* format, ...) const {
+#if OPENDHT_LOG
+ va_list args;
+ va_start(args, format);
+ log2(WARN, f1, f2, format, args);
+ va_end(args);
+#endif
+ }
+ inline void e(char const* format, ...) const {
+#if OPENDHT_LOG
+ va_list args;
+ va_start(args, format);
+ log0(ERR, format, args);
+ va_end(args);
+#endif
+ }
+ inline void e(const InfoHash& f, char const* format, ...) const {
+#if OPENDHT_LOG
+ va_list args;
+ va_start(args, format);
+ log1(ERR, f, format, args);
+ va_end(args);
+#endif
+ }
+ inline void e(const InfoHash& f1, const InfoHash& f2, char const* format, ...) const {
+#if OPENDHT_LOG
+ va_list args;
+ va_start(args, format);
+ log2(ERR, f1, f2, format, args);
+ va_end(args);
+#endif
+ }
+private:
+ bool filterEnable_ {false};
+ InfoHash filter_ {};
+};
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2018 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ * Simon Désaulniers <simon.desaulniers@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "node_cache.h"
+#include "value.h"
+#include "infohash.h"
+#include "node.h"
+#include "scheduler.h"
+#include "utils.h"
+#include "rng.h"
+#include "rate_limiter.h"
+
+#include <vector>
+#include <string>
+#include <functional>
+#include <algorithm>
+#include <memory>
+#include <queue>
+
+namespace dht {
+namespace net {
+
+struct Request;
+struct Socket;
+struct TransId;
+
+#ifndef MSG_CONFIRM
+#define MSG_CONFIRM 0
+#endif
+
+class DhtProtocolException : public DhtException {
+public:
+ // sent to another peer (http-like).
+ static const constexpr uint16_t NON_AUTHORITATIVE_INFORMATION {203}; /* incomplete request packet. */
+ static const constexpr uint16_t UNAUTHORIZED {401}; /* wrong tokens. */
+ static const constexpr uint16_t NOT_FOUND {404}; /* storage not found */
+ // for internal use (custom).
+ static const constexpr uint16_t INVALID_TID_SIZE {421}; /* id was truncated. */
+ static const constexpr uint16_t UNKNOWN_TID {422}; /* unknown tid */
+ static const constexpr uint16_t WRONG_NODE_INFO_BUF_LEN {423}; /* node info length is wrong */
+
+ static const std::string GET_NO_INFOHASH; /* received "get" request with no infohash */
+ static const std::string LISTEN_NO_INFOHASH; /* got "listen" request without infohash */
+ static const std::string LISTEN_WRONG_TOKEN; /* wrong token in "listen" request */
+ static const std::string PUT_NO_INFOHASH; /* no infohash in "put" request */
+ static const std::string PUT_WRONG_TOKEN; /* got "put" request with wrong token */
+ static const std::string STORAGE_NOT_FOUND; /* got access request for an unknown storage */
+ static const std::string PUT_INVALID_ID; /* invalid id in "put" request */
+
+ DhtProtocolException(uint16_t code, const std::string& msg="", InfoHash failing_node_id={})
+ : DhtException(msg), msg(msg), code(code), failing_node_id(failing_node_id) {}
+
+ std::string getMsg() const { return msg; }
+ uint16_t getCode() const { return code; }
+ const InfoHash getNodeId() const { return failing_node_id; }
+
+private:
+ std::string msg;
+ uint16_t code;
+ const InfoHash failing_node_id;
+};
+
+struct ParsedMessage;
+
+/**
+ * Answer for a request.
+ */
+struct RequestAnswer {
+ Blob ntoken {};
+ Value::Id vid {};
+ std::vector<Sp<Value>> values {};
+ std::vector<Value::Id> refreshed_values {};
+ std::vector<Value::Id> expired_values {};
+ std::vector<Sp<FieldValueIndex>> fields {};
+ std::vector<Sp<Node>> nodes4 {};
+ std::vector<Sp<Node>> nodes6 {};
+ RequestAnswer() {}
+ RequestAnswer(ParsedMessage&& msg);
+};
+
+/*!
+ * @class NetworkEngine
+ * @brief An abstraction of communication protocol on the network.
+ * @details
+ * The NetworkEngine processes all requests to nodes by offering a public
+ * interface for handling sending and receiving packets. The following
+ * parameters specify callbacks for DHT work:
+ *
+ * @param onError callback for handling error messages.
+ * @param onNewNode callback for handling new nodes.
+ * @param onReportedAddr callback for reporting an our address as seen from the other peer.
+ * @param onPing callback for ping request.
+ * @param onFindNode callback for "find node" request.
+ * @param onGetValues callback for "get values" request.
+ * @param onListen callback for "listen" request.
+ * @param onAnnounce callback for "announce" request.
+ * @param onRefresh callback for "refresh" request.
+ */
+class NetworkEngine final
+{
+private:
+ /**
+ * Called when we receive an error message.
+ */
+ std::function<void(Sp<Request>, DhtProtocolException)> onError;
+
+ /**
+ * Called for every packets received for handling new nodes contacting us.
+ *
+ * @param node: the node
+ * @param confirm: 1 if the node sent a message, 2 if it sent us a reply.
+ */
+ std::function<void(const Sp<Node>&, int)> onNewNode;
+ /**
+ * Called when an addres is reported from a requested node.
+ *
+ * @param h: id
+ * @param saddr_len (type: socklen_t) lenght of the sockaddr struct.
+ */
+ std::function<void(const InfoHash&, const SockAddr&)> onReportedAddr;
+ /**
+ * Called on ping reception.
+ *
+ * @param node (type: Sp<Node>) the requesting node.
+ */
+ std::function<RequestAnswer(Sp<Node>)> onPing {};
+ /**
+ * Called on find node request.
+ *
+ * @param node (type: Sp<Node>) the requesting node.
+ * @param h (type: InfoHash) hash of the value of interest.
+ * @param want (type: want_t) states if nodes sent in the response are ipv4
+ * or ipv6.
+ */
+ std::function<RequestAnswer(Sp<Node>, const InfoHash&, want_t)> onFindNode {};
+ /**
+ * Called on "get values" request.
+ *
+ * @param node (type: Sp<Node>) the requesting node.
+ * @param h (type: InfoHash) hash of the value of interest.
+ * @param want (type: want_t) states if nodes sent in the response are ipv4
+ * or ipv6.
+ */
+ std::function<RequestAnswer(Sp<Node>, const InfoHash&, want_t, const Query&)> onGetValues {};
+ /**
+ * Called on listen request.
+ *
+ * @param node (type: Sp<Node>) the requesting node.
+ * @param h (type: InfoHash) hash of the value of interest.
+ * @param token (type: Blob) security token.
+ * @param rid (type: uint16_t) request id.
+ */
+ std::function<RequestAnswer(Sp<Node>,
+ const InfoHash&,
+ const Blob&,
+ Tid,
+ const Query&)> onListen {};
+ /**
+ * Called on announce request.
+ *
+ * @param node (type: Sp<Node>) the requesting node.
+ * @param h (type: InfoHash) hash of the value of interest.
+ * @param token (type: Blob) security token.
+ * @param values (type: std::vector<Sp<Value>>) values to store.
+ * @param created (type: time_point) time when the value was created.
+ */
+ std::function<RequestAnswer(Sp<Node>,
+ const InfoHash&,
+ const Blob&,
+ const std::vector<Sp<Value>>&,
+ const time_point&)> onAnnounce {};
+ /**
+ * Called on refresh request.
+ *
+ * @param node (type: Sp<Node>) the requesting node.
+ * @param h (type: InfoHash) hash of the value of interest.
+ * @param token (type: Blob) security token.
+ * @param vid (type: Value::id) the value id.
+ */
+ std::function<RequestAnswer(Sp<Node>,
+ const InfoHash&,
+ const Blob&,
+ const Value::Id&)> onRefresh {};
+
+public:
+ using RequestCb = std::function<void(const Request&, RequestAnswer&&)>;
+ using RequestExpiredCb = std::function<void(const Request&, bool)>;
+
+ NetworkEngine(Logger& log, Scheduler& scheduler, const int& s = -1, const int& s6 = -1);
+ NetworkEngine(InfoHash& myid, NetId net, const int& s, const int& s6, Logger& log, Scheduler& scheduler,
+ decltype(NetworkEngine::onError) onError,
+ decltype(NetworkEngine::onNewNode) onNewNode,
+ decltype(NetworkEngine::onReportedAddr) onReportedAddr,
+ decltype(NetworkEngine::onPing) onPing,
+ decltype(NetworkEngine::onFindNode) onFindNode,
+ decltype(NetworkEngine::onGetValues) onGetValues,
+ decltype(NetworkEngine::onListen) onListen,
+ decltype(NetworkEngine::onAnnounce) onAnnounce,
+ decltype(NetworkEngine::onRefresh) onRefresh);
+
+ virtual ~NetworkEngine();
+
+ void clear();
+
+ /**
+ * Sends values (with closest nodes) to a listenner.
+ *
+ * @param sa The address of the listenner.
+ * @param sslen The length of the sockaddr structure.
+ * @param socket_id The tid to use to write to the request socket.
+ * @param hash The hash key of the value.
+ * @param want Wether to send ipv4 and/or ipv6 nodes.
+ * @param ntoken Listen security token.
+ * @param nodes The ipv4 closest nodes.
+ * @param nodes6 The ipv6 closest nodes.
+ * @param values The values to send.
+ */
+ void tellListener(Sp<Node> n, Tid socket_id, const InfoHash& hash, want_t want, const Blob& ntoken,
+ std::vector<Sp<Node>>&& nodes, std::vector<Sp<Node>>&& nodes6,
+ std::vector<Sp<Value>>&& values, const Query& q);
+
+ void tellListenerRefreshed(Sp<Node> n, Tid socket_id, const InfoHash& hash, const Blob& ntoken, const std::vector<Value::Id>& values);
+ void tellListenerExpired(Sp<Node> n, Tid socket_id, const InfoHash& hash, const Blob& ntoken, const std::vector<Value::Id>& values);
+
+ bool isRunning(sa_family_t af) const;
+ inline want_t want () const { return dht_socket >= 0 && dht_socket6 >= 0 ? (WANT4 | WANT6) : -1; }
+
+ void connectivityChanged(sa_family_t);
+
+ /**************
+ * Requests *
+ **************/
+
+ /**
+ * Send a "ping" request to a given node.
+ *
+ * @param n The node.
+ * @param on_done Request callback when the request is completed.
+ * @param on_expired Request callback when the request expires.
+ *
+ * @return the request with information concerning its success.
+ */
+ Sp<Request>
+ sendPing(Sp<Node> n, RequestCb&& on_done, RequestExpiredCb&& on_expired);
+ /**
+ * Send a "ping" request to a given node.
+ *
+ * @param sa The node's ip sockaddr info.
+ * @param salen The associated sockaddr struct length.
+ * @param on_done Request callback when the request is completed.
+ * @param on_expired Request callback when the request expires.
+ *
+ * @return the request with information concerning its success.
+ */
+ Sp<Request>
+ sendPing(const sockaddr* sa, socklen_t salen, RequestCb&& on_done, RequestExpiredCb&& on_expired) {
+ return sendPing(std::make_shared<Node>(zeroes, sa, salen),
+ std::forward<RequestCb>(on_done),
+ std::forward<RequestExpiredCb>(on_expired));
+ }
+ /**
+ * Send a "find node" request to a given node.
+ *
+ * @param n The node.
+ * @param target The target hash.
+ * @param want Indicating wether IPv4 or IPv6 are wanted in response.
+ * Use NetworkEngine::want()
+ * @param on_done Request callback when the request is completed.
+ * @param on_expired Request callback when the request expires.
+ *
+ * @return the request with information concerning its success.
+ */
+ Sp<Request> sendFindNode(Sp<Node> n,
+ const InfoHash& hash,
+ want_t want = -1,
+ RequestCb&& on_done = {},
+ RequestExpiredCb&& on_expired = {});
+ /**
+ * Send a "get" request to a given node.
+ *
+ * @param n The node.
+ * @param hash The target hash.
+ * @param query The query describing filters.
+ * @param token A security token.
+ * @param want Indicating wether IPv4 or IPv6 are wanted in response.
+ * Use NetworkEngine::want()
+ * @param on_done Request callback when the request is completed.
+ * @param on_expired Request callback when the request expires.
+ *
+ * @return the request with information concerning its success.
+ */
+ Sp<Request> sendGetValues(Sp<Node> n,
+ const InfoHash& hash,
+ const Query& query,
+ want_t want,
+ RequestCb&& on_done,
+ RequestExpiredCb&& on_expired);
+ /**
+ * Send a "listen" request to a given node.
+ *
+ * @param n The node.
+ * @param hash The storage's hash.
+ * @param query The query describing filters.
+ * @param token A security token.
+ * @param previous The previous request "listen" sent to this node.
+ * @param socket **UNUSED** The socket for further response.
+ *
+ * For backward compatibility purpose, sendListen has to
+ * handle creation of the socket. Therefor, you cannot
+ * use openSocket yourself. TODO: Once we don't support
+ * the old "listen" negociation, sendListen shall not
+ * create the socket itself.
+ *
+ * @param on_done Request callback when the request is completed.
+ * @param on_expired Request callback when the request expires.
+ * @param socket_cb Callback to execute each time new updates arrive on
+ * the socket.
+ *
+ * @return the request with information concerning its success.
+ */
+ Sp<Request> sendListen(Sp<Node> n,
+ const InfoHash& hash,
+ const Query& query,
+ const Blob& token,
+ Sp<Request> previous,
+ RequestCb&& on_done,
+ RequestExpiredCb&& on_expired,
+ SocketCb&& socket_cb);
+ /**
+ * Send a "announce" request to a given node.
+ *
+ * @param n The node.
+ * @param hash The target hash.
+ * @param created The time when the value was created (avoiding extended
+ * value lifetime)
+ * @param token A security token.
+ * @param on_done Request callback when the request is completed.
+ * @param on_expired Request callback when the request expires.
+ *
+ * @return the request with information concerning its success.
+ */
+ Sp<Request> sendAnnounceValue(Sp<Node> n,
+ const InfoHash& hash,
+ const Sp<Value>& v,
+ time_point created,
+ const Blob& token,
+ RequestCb&& on_done,
+ RequestExpiredCb&& on_expired);
+ /**
+ * Send a "refresh" request to a given node. Asks a node to keep the
+ * associated value Value.type.expiration more minutes in its storage.
+ *
+ * @param n The node.
+ * @param hash The target hash.
+ * @param vid The value id.
+ * @param token A security token.
+ * @param on_done Request callback when the request is completed.
+ * @param on_expired Request callback when the request expires.
+ *
+ * @return the request with information concerning its success.
+ */
+ Sp<Request> sendRefreshValue(Sp<Node> n,
+ const InfoHash& hash,
+ const Value::Id& vid,
+ const Blob& token,
+ RequestCb&& on_done,
+ RequestExpiredCb&& on_expired);
+
+ /**
+ * Parses a message and calls appropriate callbacks.
+ *
+ * @param buf The buffer containing the binary message.
+ * @param buflen The length of the buffer.
+ * @param from The address info of the sender.
+ * @param fromlen The length of the corresponding sockaddr structure.
+ * @param now The time to adjust the clock in the network engine.
+ */
+ void processMessage(const uint8_t *buf, size_t buflen, const SockAddr& addr);
+
+ Sp<Node> insertNode(const InfoHash& myid, const SockAddr& addr) {
+ auto n = cache.getNode(myid, addr, scheduler.time(), 0);
+ onNewNode(n, 0);
+ return n;
+ }
+
+ std::vector<unsigned> getNodeMessageStats(bool in) {
+ auto& st = in ? in_stats : out_stats;
+ std::vector<unsigned> stats {st.ping, st.find, st.get, st.listen, st.put};
+ st = {};
+ return stats;
+ }
+
+ void blacklistNode(const Sp<Node>& n);
+
+ std::vector<Sp<Node>> getCachedNodes(const InfoHash& id, sa_family_t sa_f, size_t count) {
+ return cache.getCachedNodes(id, sa_f, count);
+ }
+
+private:
+
+ struct PartialMessage;
+
+ /***************
+ * Constants *
+ ***************/
+ static constexpr size_t MAX_REQUESTS_PER_SEC {1600};
+ /* the length of a node info buffer in ipv4 format */
+ static const constexpr size_t NODE4_INFO_BUF_LEN {HASH_LEN + sizeof(in_addr) + sizeof(in_port_t)};
+ /* the length of a node info buffer in ipv6 format */
+ static const constexpr size_t NODE6_INFO_BUF_LEN {HASH_LEN + sizeof(in6_addr) + sizeof(in_port_t)};
+ /* after a UDP reply, the period during which we tell the link layer about it */
+ static constexpr std::chrono::seconds UDP_REPLY_TIME {15};
+
+ /* Max. time to receive a full fragmented packet */
+ static constexpr std::chrono::seconds RX_MAX_PACKET_TIME {10};
+ /* Max. time between packet fragments */
+ static constexpr std::chrono::seconds RX_TIMEOUT {3};
+ /* The maximum number of nodes that we snub. There is probably little
+ reason to increase this value. */
+ static constexpr unsigned BLACKLISTED_MAX {10};
+
+ static constexpr size_t MTU {1280};
+ static constexpr size_t MAX_PACKET_VALUE_SIZE {600};
+
+ static const std::string my_v;
+
+ void process(std::unique_ptr<ParsedMessage>&&, const SockAddr& from);
+
+ bool rateLimit(const SockAddr& addr);
+
+ static bool isMartian(const SockAddr& addr);
+ bool isNodeBlacklisted(const SockAddr& addr) const;
+
+ void requestStep(Sp<Request> req);
+
+ /**
+ * Sends a request to a node. Request::MAX_ATTEMPT_COUNT attempts will
+ * be made before the request expires.
+ */
+ void sendRequest(const Sp<Request>& request);
+
+ struct MessageStats {
+ unsigned ping {0};
+ unsigned find {0};
+ unsigned get {0};
+ unsigned put {0};
+ unsigned listen {0};
+ unsigned refresh {0};
+ };
+
+
+ // basic wrapper for socket sendto function
+ int send(const char *buf, size_t len, int flags, const SockAddr& addr);
+
+ void sendValueParts(const TransId& tid, const std::vector<Blob>& svals, const SockAddr& addr);
+ std::vector<Blob> packValueHeader(msgpack::sbuffer&, const std::vector<Sp<Value>>&);
+ void maintainRxBuffer(Tid tid);
+
+ /*************
+ * Answers *
+ *************/
+ /* answer to a ping request */
+ void sendPong(const SockAddr& addr, Tid tid);
+ /* answer to findnodes/getvalues request */
+ void sendNodesValues(const SockAddr& addr,
+ Tid tid,
+ const Blob& nodes,
+ const Blob& nodes6,
+ const std::vector<Sp<Value>>& st,
+ const Query& query,
+ const Blob& token);
+ Blob bufferNodes(sa_family_t af, const InfoHash& id, std::vector<Sp<Node>>& nodes);
+
+ std::pair<Blob, Blob> bufferNodes(sa_family_t af,
+ const InfoHash& id,
+ want_t want,
+ std::vector<Sp<Node>>& nodes,
+ std::vector<Sp<Node>>& nodes6);
+ /* answer to a listen request */
+ void sendListenConfirmation(const SockAddr& addr, Tid tid);
+ /* answer to put request */
+ void sendValueAnnounced(const SockAddr& addr, Tid, Value::Id);
+ /* answer in case of error */
+ void sendError(const SockAddr& addr,
+ Tid tid,
+ uint16_t code,
+ const std::string& message,
+ bool include_id=false);
+
+ void deserializeNodes(ParsedMessage& msg, const SockAddr& from);
+
+ /* DHT info */
+ const InfoHash& myid;
+ const NetId network {0};
+ const int& dht_socket;
+ const int& dht_socket6;
+ const Logger& DHT_LOG;
+
+ NodeCache cache {};
+
+ // global limiting should be triggered by at least 8 different IPs
+ using IpLimiter = RateLimiter<MAX_REQUESTS_PER_SEC/8>;
+ using IpLimiterMap = std::map<SockAddr, IpLimiter, SockAddr::ipCmp>;
+ IpLimiterMap address_rate_limiter {};
+ RateLimiter<MAX_REQUESTS_PER_SEC> rate_limiter {};
+ size_t limiter_maintenance {0};
+
+ // requests handling
+ std::map<Tid, Sp<Request>> requests {};
+ std::map<Tid, PartialMessage> partial_messages;
+
+ MessageStats in_stats {}, out_stats {};
+ std::set<SockAddr> blacklist {};
+
+ Scheduler& scheduler;
+
+ bool logIncoming_ {false};
+};
+
+} /* namespace net */
+} /* namespace dht */
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ * Simon Désaulniers <simon.desaulniers@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "infohash.h" // includes socket structures
+#include "utils.h"
+#include "sockaddr.h"
+
+#include <list>
+#include <map>
+
+namespace dht {
+
+struct Node;
+namespace net {
+struct Request;
+struct Socket;
+struct RequestAnswer;
+} /* namespace net */
+
+using Tid = uint32_t;
+using SocketCb = std::function<void(const Sp<Node>&, net::RequestAnswer&&)>;
+struct Socket {
+ Socket() {}
+ Socket(SocketCb&& on_receive) :
+ on_receive(std::move(on_receive)) {}
+ SocketCb on_receive {};
+};
+
+struct Node {
+ const InfoHash id;
+
+ Node(const InfoHash& id, const SockAddr& addr, bool client=false);
+ Node(const InfoHash& id, const sockaddr* sa, socklen_t salen)
+ : Node(id, SockAddr(sa, salen)) {}
+
+ InfoHash getId() const {
+ return id;
+ }
+ const SockAddr& getAddr() const { return addr; }
+ std::string getAddrStr() const {
+ return addr.toString();
+ }
+ bool isClient() const { return is_client; }
+ bool isIncoming() { return time > reply_time; }
+
+ const time_point& getTime() const { return time; }
+ const time_point& getReplyTime() const { return reply_time; }
+ void setTime(const time_point& t) { time = t; }
+
+ /**
+ * Makes notice about an additionnal authentication error with this node. Up
+ * to MAX_AUTH_ERRORS errors are accepted in order to let the node recover.
+ * Upon this limit, the node expires.
+ */
+ void authError() {
+ if (++auth_errors > MAX_AUTH_ERRORS)
+ setExpired();
+ }
+ void authSuccess() { auth_errors = 0; }
+
+ bool isExpired() const { return expired_; }
+ bool isGood(time_point now) const;
+ bool isPendingMessage() const;
+ size_t getPendingMessageCount() const;
+
+ bool isOld(const time_point& now) const {
+ return time + NODE_EXPIRE_TIME < now;
+ }
+ bool isRemovable(const time_point& now) const {
+ return isExpired() and isOld(now);
+ }
+
+ NodeExport exportNode() const {
+ NodeExport ne;
+ ne.id = id;
+ ne.sslen = addr.getLength();
+ std::memcpy(&ne.ss, addr.get(), ne.sslen);
+ return ne;
+ }
+ sa_family_t getFamily() const { return addr.getFamily(); }
+
+ void update(const SockAddr&);
+
+ void requested(const Sp<net::Request>& req);
+ void received(time_point now, const Sp<net::Request>& req);
+ Sp<net::Request> getRequest(Tid tid);
+ void cancelRequest(const Sp<net::Request>& req);
+
+ void setExpired();
+
+ /**
+ * Opens a socket on which a node will be able allowed to write for further
+ * additionnal updates following the response to a previous request.
+ *
+ * @param node The node which will be allowed to write on this socket.
+ * @param cb The callback to execute once updates arrive on the socket.
+ *
+ * @return the socket.
+ */
+ Tid openSocket(SocketCb&& cb);
+
+ Sp<Socket> getSocket(Tid id);
+
+ /**
+ * Closes a socket so that no further data will be red on that socket.
+ *
+ * @param socket The socket to close.
+ */
+ void closeSocket(Tid id);
+
+ /**
+ * Resets the state of the node so it's not expired anymore.
+ */
+ void reset() { expired_ = false; reply_time = time_point::min(); }
+
+ /**
+ * Generates a new request id, skipping the invalid id.
+ *
+ * @return the new id.
+ */
+ Tid getNewTid() {
+ ++transaction_id;
+ return transaction_id ? ++transaction_id : transaction_id;
+ }
+
+ std::string toString() const;
+
+ OPENDHT_PUBLIC friend std::ostream& operator<< (std::ostream& s, const Node& h);
+
+ static constexpr const std::chrono::minutes NODE_GOOD_TIME {120};
+
+ /* The time after which we consider a node to be expirable. */
+ static constexpr const std::chrono::minutes NODE_EXPIRE_TIME {10};
+
+ /* Time for a request to timeout */
+ static constexpr const std::chrono::seconds MAX_RESPONSE_TIME {1};
+
+private:
+ /* Number of times we accept authentication errors from this node. */
+ static const constexpr unsigned MAX_AUTH_ERRORS {3};
+
+ SockAddr addr;
+ bool is_client {false};
+ time_point time {time_point::min()}; /* last time eared about */
+ time_point reply_time {time_point::min()}; /* time of last correct reply received */
+ unsigned auth_errors {0};
+ bool expired_ {false};
+ Tid transaction_id;
+ using TransactionDist = std::uniform_int_distribution<decltype(transaction_id)>;
+
+ std::map<Tid, Sp<net::Request>> requests_ {};
+ std::map<Tid, Sp<Socket>> sockets_;
+};
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "node.h"
+
+#include <list>
+#include <memory>
+
+namespace dht {
+
+struct NodeCache {
+ Sp<Node> getNode(const InfoHash& id, sa_family_t family);
+ Sp<Node> getNode(const InfoHash& id, const SockAddr&, time_point now, bool confirmed, bool client=false);
+ std::vector<Sp<Node>> getCachedNodes(const InfoHash& id, sa_family_t sa_f, size_t count) const;
+
+ /**
+ * Reset the connectivity state of every node,
+ * Giving them a new chance if they where expired.
+ * To use in case of connectivity change etc.
+ */
+ void clearBadNodes(sa_family_t family = 0);
+
+ ~NodeCache();
+
+private:
+ class NodeMap : private std::map<InfoHash, std::weak_ptr<Node>> {
+ public:
+ Sp<Node> getNode(const InfoHash& id);
+ Sp<Node> getNode(const InfoHash& id, const SockAddr&, time_point now, bool confirmed, bool client);
+ std::vector<Sp<Node>> getCachedNodes(const InfoHash& id, size_t count) const;
+ void clearBadNodes();
+ void setExpired();
+ void cleanup();
+ private:
+ size_t cleanup_counter {0};
+ };
+
+ const NodeMap& cache(sa_family_t af) const { return af == AF_INET ? cache_4 : cache_6; }
+ NodeMap& cache(sa_family_t af) { return af == AF_INET ? cache_4 : cache_6; }
+ NodeMap cache_4;
+ NodeMap cache_6;
+};
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2017-2018 Savoir-faire Linux Inc.
+ * Author: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+#pragma once
+
+#include <chrono>
+
+namespace dht {
+namespace proxy {
+
+constexpr const std::chrono::seconds OP_TIMEOUT {1 * 60 * 60}; // one hour
+constexpr const std::chrono::seconds OP_MARGIN {5 * 60}; // 5 minutes
+constexpr const char* const HTTP_PROTO {"http://"};
+using ListenToken = uint64_t;
+
+}
+}
--- /dev/null
+/*
+ * Copyright (C) 2017 Savoir-faire Linux Inc.
+ * Author : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "utils.h"
+#include <queue>
+
+namespace dht {
+
+template<size_t Quota, unsigned long Period=1>
+class RateLimiter {
+public:
+ /** Clear outdated records and return current quota usage */
+ size_t maintain(const time_point& now) {
+ auto limit = now - std::chrono::seconds(Period);
+ while (not records.empty() and records.front() < limit)
+ records.pop();
+ return records.size();
+ }
+ /** Return false if quota is reached, insert record and return true otherwise. */
+ bool limit(const time_point& now) {
+ if (maintain(now) >= Quota)
+ return false;
+ records.emplace(now);
+ return true;
+ }
+ bool empty() const {
+ return records.empty();
+ }
+private:
+ std::queue<time_point> records {};
+};
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include <random>
+#include <algorithm>
+#include <functional>
+
+namespace dht {
+namespace crypto {
+
+#ifndef _MSC_VER
+#ifdef _WIN32
+
+/**
+ * Hardware random number generator using Intel RDRAND/RDSEED,
+ * API-compatible with std::random_device.
+ */
+class random_device {
+public:
+ using result_type = std::random_device::result_type;
+ using pseudo_engine = std::mt19937_64;
+
+ /**
+ * Current implementation assumption : result_type must be of a size
+ * supported by Intel RDRAND/RDSEED.
+ * result_type is unsigned int so this is currently safe.
+ */
+ static_assert(
+ sizeof(result_type) == 2 ||
+ sizeof(result_type) == 4 ||
+ sizeof(result_type) == 8,
+ "result_type must be 16, 32 or 64 bits");
+
+ random_device();
+
+ result_type operator()();
+
+ static constexpr result_type min() {
+ return std::numeric_limits<result_type>::lowest();
+ }
+
+ static constexpr result_type max() {
+ return std::numeric_limits<result_type>::max();
+ }
+
+ double entropy() const {
+ if (hasRdrand() or hasRdseed())
+ return 1.;
+ return 0.;
+ }
+
+ static bool hasRdrand() {
+ static const bool hasrdrand = _hasRdrand();
+ return hasrdrand;
+ }
+
+ static bool hasRdseed() {
+ static const bool hasrdseed = _hasRdseed();
+ return hasrdseed;
+ }
+
+private:
+ random_device& operator=(random_device&) = delete;
+
+ pseudo_engine gen;
+ std::uniform_int_distribution<result_type> dis {};
+
+ static bool hasIntelCpu();
+ static bool _hasRdrand();
+ static bool _hasRdseed();
+
+ struct CPUIDinfo {
+ unsigned int EAX;
+ unsigned int EBX;
+ unsigned int ECX;
+ unsigned int EDX;
+ CPUIDinfo(const unsigned int func, const unsigned int subfunc);
+ };
+ bool rdrandStep(result_type* r);
+ bool rdrand(result_type* r);
+ bool rdseedStep(result_type* r);
+ bool rdseed(result_type* r);
+};
+
+#else
+
+using random_device = std::random_device;
+
+#endif
+#else
+using random_device = std::random_device;
+#endif
+
+template<class T = std::mt19937, std::size_t N = T::state_size>
+auto getSeededRandomEngine () -> typename std::enable_if<!!N, T>::type {
+ typename T::result_type random_data[N];
+ random_device source;
+ std::generate(std::begin(random_data), std::end(random_data), std::ref(source));
+ std::seed_seq seeds(std::begin(random_data), std::end(random_data));
+ T seededEngine (seeds);
+ return seededEngine;
+}
+
+}} // dht::crypto
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ * Simon Désaulniers <simon.desaulniers@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "node.h"
+
+namespace dht {
+
+static constexpr unsigned TARGET_NODES {8};
+namespace net {
+class NetworkEngine;
+}
+
+struct Bucket {
+ Bucket() : cached() {}
+ Bucket(sa_family_t af, const InfoHash& f = {}, time_point t = time_point::min())
+ : af(af), first(f), time(t), cached() {}
+ sa_family_t af {0};
+ InfoHash first {};
+ time_point time {time_point::min()}; /* time of last reply in this bucket */
+ std::list<Sp<Node>> nodes {};
+ Sp<Node> cached; /* the address of a likely candidate */
+
+ /** Return a random node in a bucket. */
+ Sp<Node> randomNode();
+
+ void sendCachedPing(net::NetworkEngine& ne);
+ void connectivityChanged() {
+ time = time_point::min();
+ for (auto& node : nodes)
+ node->setTime(time_point::min());
+ }
+};
+
+class RoutingTable : public std::list<Bucket> {
+public:
+ using std::list<Bucket>::list;
+
+ time_point grow_time {time_point::min()};
+ bool is_client {false};
+
+ InfoHash middle(const RoutingTable::const_iterator&) const;
+
+ std::vector<Sp<Node>> findClosestNodes(const InfoHash id, time_point now, size_t count = TARGET_NODES) const;
+
+ RoutingTable::iterator findBucket(const InfoHash& id);
+ RoutingTable::const_iterator findBucket(const InfoHash& id) const;
+
+ /**
+ * Return true if the id is in the bucket's range.
+ */
+ inline bool contains(const RoutingTable::const_iterator& bucket, const InfoHash& id) const {
+ return InfoHash::cmp(bucket->first, id) <= 0
+ && (std::next(bucket) == end() || InfoHash::cmp(id, std::next(bucket)->first) < 0);
+ }
+
+ /**
+ * Return true if the table has no bucket ore one empty buket.
+ */
+ inline bool isEmpty() const {
+ return empty() || (size() == 1 && front().nodes.empty());
+ }
+
+ void connectivityChanged(const time_point& now) {
+ grow_time = now;
+ for (auto& b : *this)
+ b.connectivityChanged();
+ }
+
+ bool onNewNode(const Sp<Node>& node, int comfirm, const time_point& now, const InfoHash& myid, net::NetworkEngine& ne);
+
+ /**
+ * Return a random id in the bucket's range.
+ */
+ InfoHash randomId(const RoutingTable::const_iterator& bucket) const;
+
+ unsigned depth(const RoutingTable::const_iterator& bucket) const;
+
+ /**
+ * Split a bucket in two equal parts.
+ */
+ bool split(const RoutingTable::iterator& b);
+};
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ * Simon Désaulniers <simon.desaulniers@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+
+#pragma once
+
+#include "utils.h"
+#include "log_enable.h"
+
+#include <functional>
+#include <map>
+
+namespace dht {
+
+/*!
+ * @class Scheduler
+ * @brief Job scheduler
+ * @details
+ * Maintains the timings upon which to execute a job.
+ */
+class Scheduler {
+public:
+ struct Job {
+ Job(std::function<void()>&& f) : do_(std::move(f)) {}
+ std::function<void()> do_;
+ void cancel() { do_ = {}; }
+ };
+
+ /**
+ * Adds another job to the queue.
+ *
+ * @param time The time upon which the job shall be executed.
+ * @param job_func The job function to execute.
+ *
+ * @return pointer to the newly scheduled job.
+ */
+ Sp<Scheduler::Job> add(time_point t, std::function<void()>&& job_func) {
+ auto job = std::make_shared<Job>(std::move(job_func));
+ if (t != time_point::max())
+ timers.emplace(std::move(t), job);
+ return job;
+ }
+
+ void add(const Sp<Scheduler::Job>& job, time_point t) {
+ if (t != time_point::max())
+ timers.emplace(std::move(t), job);
+ }
+
+ /**
+ * Reschedules a job.
+ *
+ * @param job The job to edit.
+ * @param t The time at which the job shall be rescheduled.
+ */
+ void edit(Sp<Scheduler::Job>& job, time_point t) {
+ if (not job) {
+ return;
+ }
+ // std::function move doesn't garantee to leave the object empty.
+ // Force clearing old value.
+ auto task = std::move(job->do_);
+ job->do_ = {};
+ job = add(t, std::move(task));
+ }
+
+ /**
+ * Runs the jobs to do up to now.
+ *
+ * @return The time for the next job to run.
+ */
+ time_point run() {
+ syncTime();
+ while (not timers.empty()) {
+ auto timer = timers.begin();
+ /*
+ * Running jobs scheduled before "now" prevents run+rescheduling
+ * loops before this method ends. It is garanteed by the fact that a
+ * job will at least be scheduled for "now" and not before.
+ */
+ if (timer->first > now)
+ break;
+
+ auto job = std::move(timer->second);
+ timers.erase(timer);
+
+ if (job->do_)
+ job->do_();
+ }
+ return getNextJobTime();
+ }
+
+ inline time_point getNextJobTime() const {
+ return timers.empty() ? time_point::max() : timers.begin()->first;
+ }
+
+ /**
+ * Accessors for the common time reference used for synchronizing
+ * operations.
+ */
+ inline const time_point& time() const { return now; }
+ inline time_point syncTime() { return (now = clock::now()); }
+
+private:
+ time_point now {clock::now()};
+ std::multimap<time_point, Sp<Job>> timers {}; /* the jobs ordered by time */
+};
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Authors: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ * Simon Désaulniers <simon.desaulniers@savoirfairelinux.com>
+ * Sébastien Blin <sebastien.blin@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "dht.h"
+#include "crypto.h"
+
+#include <map>
+#include <vector>
+#include <memory>
+#include <random>
+
+namespace dht {
+
+class OPENDHT_PUBLIC SecureDht final : public DhtInterface {
+public:
+
+ typedef std::function<void(bool)> SignatureCheckCallback;
+
+ using Config = SecureDhtConfig;
+
+ static dht::Config& getConfig(SecureDht::Config& conf)
+ {
+ auto& c = conf.node_config;
+ if (not c.node_id and conf.id.second)
+ c.node_id = InfoHash::get("node:"+conf.id.second->getId().toString());
+ return c;
+ }
+
+ SecureDht() {}
+
+ /**
+ * s, s6: bound socket descriptors for IPv4 and IPv6, respectively.
+ * For the Dht to be initialised, at least one of them must be >= 0.
+ * id: the identity to use for the crypto layer and to compute
+ * our own hash on the Dht.
+ */
+ SecureDht(std::unique_ptr<DhtInterface> dht, Config config);
+
+ virtual ~SecureDht();
+
+ InfoHash getId() const {
+ return key_ ? key_->getPublicKey().getId() : InfoHash();
+ }
+ PkId getLongId() const {
+ return key_ ? key_->getPublicKey().getLongId() : PkId();
+ }
+
+ ValueType secureType(ValueType&& type);
+
+ ValueType secureType(const ValueType& type) {
+ ValueType tmp_type = type;
+ return secureType(std::move(tmp_type));
+ }
+
+ void registerType(const ValueType& type) override {
+ if (dht_)
+ dht_->registerType(secureType(type));
+ }
+ void registerType(ValueType&& type) {
+ if (dht_)
+ dht_->registerType(secureType(std::forward<ValueType>(type)));
+ }
+ void registerInsecureType(const ValueType& type) {
+ if (dht_)
+ dht_->registerType(type);
+ }
+
+ /**
+ * "Secure" get(), that will check the signature of signed data, and decrypt encrypted data.
+ * If the signature can't be checked, or if the data can't be decrypted, it is not returned.
+ * Public, non-signed & non-encrypted data is retransmitted as-is.
+ */
+ void get(const InfoHash& id, GetCallback cb, DoneCallback donecb={}, Value::Filter&& = {}, Where&& w = {}) override;
+ void get(const InfoHash& id, GetCallback cb, DoneCallbackSimple donecb={}, Value::Filter&& f = {}, Where&& w = {}) override {
+ get(id, cb, bindDoneCb(donecb), std::forward<Value::Filter>(f), std::forward<Where>(w));
+ }
+ void get(const InfoHash& key, GetCallbackSimple cb, DoneCallback donecb={}, Value::Filter&& f={}, Where&& w = {}) override {
+ get(key, bindGetCb(cb), donecb, std::forward<Value::Filter>(f), std::forward<Where>(w));
+ }
+ void get(const InfoHash& key, GetCallbackSimple cb, DoneCallbackSimple donecb, Value::Filter&& f={}, Where&& w = {}) override {
+ get(key, bindGetCb(cb), bindDoneCb(donecb), std::forward<Value::Filter>(f), std::forward<Where>(w));
+ }
+
+ /**
+ * Will take ownership of the value, sign it using our private key and put it in the DHT.
+ */
+ void putSigned(const InfoHash& hash, Sp<Value> val, DoneCallback callback, bool permanent = false);
+ void putSigned(const InfoHash& hash, Value&& v, DoneCallback callback, bool permanent = false) {
+ putSigned(hash, std::make_shared<Value>(std::move(v)), callback, permanent);
+ }
+
+ /**
+ * Will sign the data using our private key, encrypt it using the recipient' public key,
+ * and put it in the DHT.
+ * The operation will be immediate if the recipient' public key is known (otherwise it will be retrived first).
+ */
+ void putEncrypted(const InfoHash& hash, const InfoHash& to, Sp<Value> val, DoneCallback callback, bool permanent = false);
+ void putEncrypted(const InfoHash& hash, const InfoHash& to, Value&& v, DoneCallback callback, bool permanent = false) {
+ putEncrypted(hash, to, std::make_shared<Value>(std::move(v)), callback, permanent);
+ }
+
+ /**
+ * Take ownership of the value and sign it using our private key.
+ */
+ void sign(Value& v) const;
+
+ Value encrypt(Value& v, const crypto::PublicKey& to) const;
+
+ Value decrypt(const Value& v);
+
+ void findCertificate(const InfoHash& node, std::function<void(const Sp<crypto::Certificate>)> cb);
+ void findPublicKey(const InfoHash& node, std::function<void(const Sp<const crypto::PublicKey>)> cb);
+
+ const Sp<crypto::Certificate> registerCertificate(const InfoHash& node, const Blob& cert);
+ void registerCertificate(Sp<crypto::Certificate>& cert);
+
+ const Sp<crypto::Certificate> getCertificate(const InfoHash& node) const;
+ const Sp<const crypto::PublicKey> getPublicKey(const InfoHash& node) const;
+
+ /**
+ * Allows to set a custom callback called by the library to find a locally-stored certificate.
+ * The search key used is the public key ID, so there may be multiple certificates retured, signed with
+ * the same private key.
+ */
+ void setLocalCertificateStore(CertificateStoreQuery&& query_method) {
+ localQueryMethod_ = std::move(query_method);
+ }
+
+ /**
+ * SecureDht to Dht proxy
+ */
+ void shutdown(ShutdownCallback cb) override {
+ dht_->shutdown(cb);
+ }
+ void dumpTables() const override {
+ dht_->dumpTables();
+ }
+ inline const InfoHash& getNodeId() const override { return dht_->getNodeId(); }
+
+ std::pair<size_t, size_t> getStoreSize() const override {
+ return dht_->getStoreSize();
+ }
+ std::string getStorageLog() const override {
+ return dht_->getStorageLog();
+ }
+ std::string getStorageLog(const InfoHash& h) const override {
+ return dht_->getStorageLog(h);
+ }
+ void setStorageLimit(size_t limit = DEFAULT_STORAGE_LIMIT) override {
+ dht_->setStorageLimit(limit);
+ }
+ std::vector<NodeExport> exportNodes() override {
+ return dht_->exportNodes();
+ }
+ std::vector<ValuesExport> exportValues() const override {
+ return dht_->exportValues();
+ }
+ void importValues(const std::vector<ValuesExport>& v) override {
+ dht_->importValues(v);
+ }
+ NodeStats getNodesStats(sa_family_t af) const override {
+ return dht_->getNodesStats(af);
+ }
+ std::vector<unsigned> getNodeMessageStats(bool in = false) override {
+ return dht_->getNodeMessageStats(in);
+ }
+ std::string getRoutingTablesLog(sa_family_t af) const override {
+ return dht_->getRoutingTablesLog(af);
+ }
+ std::string getSearchesLog(sa_family_t af) const override {
+ return dht_->getSearchesLog(af);
+ }
+ std::string getSearchLog(const InfoHash& h, sa_family_t af = AF_UNSPEC) const override {
+ return dht_->getSearchLog(h, af);
+ }
+ std::vector<SockAddr> getPublicAddress(sa_family_t family = 0) override {
+ return dht_->getPublicAddress(family);
+ }
+ time_point periodic(const uint8_t *buf, size_t buflen, const SockAddr& sa) override {
+ return dht_->periodic(buf, buflen, sa);
+ }
+ time_point periodic(const uint8_t *buf, size_t buflen, const sockaddr* from, socklen_t fromlen) override {
+ return dht_->periodic(buf, buflen, from, fromlen);
+ }
+ NodeStatus getStatus(sa_family_t af) const override {
+ return dht_->getStatus(af);
+ }
+ NodeStatus getStatus() const override {
+ return dht_->getStatus();
+ }
+ bool isRunning(sa_family_t af = 0) const override {
+ return dht_->isRunning(af);
+ }
+ const ValueType& getType(ValueType::Id type_id) const override {
+ return dht_->getType(type_id);
+ }
+ void insertNode(const InfoHash& id, const SockAddr& sa) override {
+ dht_->insertNode(id, sa);
+ }
+ void insertNode(const InfoHash& id, const sockaddr* sa, socklen_t salen) override {
+ dht_->insertNode(id, sa, salen);
+ }
+ void insertNode(const NodeExport& n) override {
+ dht_->insertNode(n);
+ }
+ void pingNode(const sockaddr* sa, socklen_t salen, DoneCallbackSimple&& cb={}) override {
+ dht_->pingNode(sa, salen, std::move(cb));
+ }
+ void query(const InfoHash& key, QueryCallback cb, DoneCallback done_cb = {}, Query&& q = {}) override {
+ dht_->query(key, cb, done_cb, std::move(q));
+ }
+ void query(const InfoHash& key, QueryCallback cb, DoneCallbackSimple done_cb = {}, Query&& q = {}) override {
+ dht_->query(key, cb, done_cb, std::move(q));
+ }
+ std::vector<Sp<Value>> getLocal(const InfoHash& key, Value::Filter f = Value::AllFilter()) const override {
+ return dht_->getLocal(key, f);
+ }
+ Sp<Value> getLocalById(const InfoHash& key, Value::Id vid) const override {
+ return dht_->getLocalById(key, vid);
+ }
+ void put(const InfoHash& key,
+ Sp<Value> v,
+ DoneCallback cb=nullptr,
+ time_point created=time_point::max(),
+ bool permanent = false) override
+ {
+ dht_->put(key, v, cb, created, permanent);
+ }
+ void put(const InfoHash& key,
+ const Sp<Value>& v,
+ DoneCallbackSimple cb,
+ time_point created=time_point::max(),
+ bool permanent = false) override
+ {
+ dht_->put(key, v, cb, created, permanent);
+ }
+
+ void put(const InfoHash& key,
+ Value&& v,
+ DoneCallback cb=nullptr,
+ time_point created=time_point::max(),
+ bool permanent = false) override
+ {
+ dht_->put(key, std::move(v), cb, created, permanent);
+ }
+ void put(const InfoHash& key,
+ Value&& v,
+ DoneCallbackSimple cb,
+ time_point created=time_point::max(),
+ bool permanent = false) override
+ {
+ dht_->put(key, std::move(v), cb, created, permanent);
+ }
+ std::vector<Sp<Value>> getPut(const InfoHash& h) override {
+ return dht_->getPut(h);
+ }
+ Sp<Value> getPut(const InfoHash& h, const Value::Id& vid) override {
+ return dht_->getPut(h, vid);
+ }
+ bool cancelPut(const InfoHash& h, const Value::Id& vid) override {
+ return dht_->cancelPut(h, vid);
+ }
+
+ size_t listen(const InfoHash& key, ValueCallback, Value::Filter={}, Where={}) override;
+ size_t listen(const InfoHash& key, GetCallback cb, Value::Filter = {}, Where w = {}) override;
+ size_t listen(const InfoHash& key, GetCallbackSimple cb, Value::Filter f={}, Where w = {}) override {
+ return listen(key, bindGetCb(cb), f, w);
+ }
+ bool cancelListen(const InfoHash& h, size_t token) override {
+ return dht_->cancelListen(h, token);
+ }
+ void connectivityChanged(sa_family_t af) override {
+ dht_->connectivityChanged(af);
+ }
+ void connectivityChanged() override {
+ dht_->connectivityChanged();
+ }
+
+ void forwardAllMessages(bool forward) {
+ forward_all_ = forward;
+ }
+
+ void setPushNotificationToken(const std::string& token = "") override {
+ dht_->setPushNotificationToken(token);
+ }
+
+ /**
+ * Call linked callback with push_notification
+ * @param notification to process
+ */
+ void pushNotificationReceived(const std::map<std::string, std::string>& notification) override {
+ dht_->pushNotificationReceived(notification);
+ }
+
+ void setLoggers(LogMethod error = NOLOG, LogMethod warn = NOLOG, LogMethod debug = NOLOG) override
+ {
+ DhtInterface::setLoggers(error, warn, debug);
+ dht_->setLoggers(error, warn, debug);
+ }
+
+ /**
+ * Only print logs related to the given InfoHash (if given), or disable filter (if zeroes).
+ */
+ void setLogFilter(const InfoHash& f) override {
+ DHT_LOG.setFilter(f);
+ dht_->setLogFilter(f);
+ }
+
+private:
+ std::unique_ptr<DhtInterface> dht_;
+ // prevent copy
+ SecureDht(const SecureDht&) = delete;
+ SecureDht& operator=(const SecureDht&) = delete;
+
+ Sp<Value> checkValue(const Sp<Value>& v);
+ ValueCallback getCallbackFilter(ValueCallback, Value::Filter&&);
+ GetCallback getCallbackFilter(GetCallback, Value::Filter&&);
+
+ Sp<crypto::PrivateKey> key_ {};
+ Sp<crypto::Certificate> certificate_ {};
+
+ // method to query the local certificate store
+ CertificateStoreQuery localQueryMethod_ {};
+
+ // our certificate cache
+ std::map<InfoHash, Sp<crypto::Certificate>> nodesCertificates_ {};
+ std::map<InfoHash, Sp<const crypto::PublicKey>> nodesPubKeys_ {};
+
+ std::atomic_bool forward_all_ {false};
+};
+
+const ValueType CERTIFICATE_TYPE = {
+ 8, "Certificate", std::chrono::hours(24 * 7),
+ // A certificate can only be stored at its public key ID.
+ [](InfoHash id, Sp<Value>& v, const InfoHash&, const SockAddr&) {
+ try {
+ crypto::Certificate crt(v->data);
+ // TODO check certificate signature
+ return crt.getPublicKey().getId() == id;
+ } catch (const std::exception& e) {}
+ return false;
+ },
+ [](InfoHash, const Sp<Value>& o, Sp<Value>& n, const InfoHash&, const SockAddr&) {
+ try {
+ return crypto::Certificate(o->data).getPublicKey().getId() == crypto::Certificate(n->data).getPublicKey().getId();
+ } catch (const std::exception& e) {}
+ return false;
+ }
+};
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2016 Savoir-faire Linux Inc.
+ * Author : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "def.h"
+
+#ifndef _WIN32
+#include <sys/socket.h>
+#include <netinet/in.h>
+#ifdef __ANDROID__
+typedef uint16_t in_port_t;
+#endif
+#else
+#include <iso646.h>
+#include <stdint.h>
+#include <winsock2.h>
+#include <ws2def.h>
+#include <ws2tcpip.h>
+typedef uint16_t sa_family_t;
+typedef uint16_t in_port_t;
+#endif
+
+#include <string>
+#include <memory>
+#include <vector>
+#include <stdlib.h>
+
+#include <cstring>
+#include <cstddef>
+
+namespace dht {
+
+OPENDHT_PUBLIC std::string print_addr(const sockaddr* sa, socklen_t slen);
+OPENDHT_PUBLIC std::string print_addr(const sockaddr_storage& ss, socklen_t sslen);
+
+/**
+ * A Socket Address (sockaddr*), with abstraction for IPv4, IPv6 address families.
+ */
+class OPENDHT_PUBLIC SockAddr {
+public:
+ SockAddr() {}
+ SockAddr(const SockAddr& o) {
+ set(o.get(), o.getLength());
+ }
+ SockAddr(SockAddr&& o) : len(o.len), addr(std::move(o.addr)) {
+ o.len = 0;
+ }
+
+ /**
+ * Build from existing address.
+ */
+ SockAddr(const sockaddr* sa, socklen_t length) {
+ if (length > sizeof(sockaddr_storage))
+ throw std::runtime_error("Socket address length is too large");
+ set(sa, length);
+ }
+ SockAddr(const sockaddr* sa) {
+ socklen_t len = 0;
+ if (sa) {
+ if (sa->sa_family == AF_INET)
+ len = sizeof(sockaddr_in);
+ else if(sa->sa_family == AF_INET6)
+ len = sizeof(sockaddr_in6);
+ else
+ throw std::runtime_error("Unknown address family");
+ }
+ set(sa, len);
+ }
+
+ /**
+ * Build from an existing sockaddr_storage structure.
+ */
+ SockAddr(const sockaddr_storage& ss, socklen_t len) : SockAddr((const sockaddr*)&ss, len) {}
+
+ static std::vector<SockAddr> resolve(const std::string& host, const std::string& service = {});
+
+ bool operator<(const SockAddr& o) const {
+ if (len != o.len)
+ return len < o.len;
+ return std::memcmp((const uint8_t*)get(), (const uint8_t*)o.get(), len) < 0;
+ }
+
+ bool equals(const SockAddr& o) const {
+ return len == o.len
+ && std::memcmp((const uint8_t*)get(), (const uint8_t*)o.get(), len) == 0;
+ }
+ SockAddr& operator=(const SockAddr& o) {
+ set(o.get(), o.getLength());
+ return *this;
+ }
+ SockAddr& operator=(SockAddr&& o) {
+ len = o.len;
+ o.len = 0;
+ addr = std::move(o.addr);
+ return *this;
+ }
+
+ std::string toString() const {
+ return print_addr(get(), getLength());
+ }
+
+ /**
+ * Returns the address family or AF_UNSPEC if the address is not set.
+ */
+ sa_family_t getFamily() const { return len > sizeof(sa_family_t) ? addr->sa_family : AF_UNSPEC; }
+
+ /**
+ * Resize the managed structure to the appropriate size (if needed),
+ * in which case the sockaddr structure is cleared to zero,
+ * and set the address family field (sa_family).
+ */
+ void setFamily(sa_family_t af) {
+ socklen_t new_length;
+ switch(af) {
+ case AF_INET:
+ new_length = sizeof(sockaddr_in);
+ break;
+ case AF_INET6:
+ new_length = sizeof(sockaddr_in6);
+ break;
+ default:
+ new_length = 0;
+ }
+ if (new_length != len) {
+ len = new_length;
+ if (len) addr.reset((sockaddr*)::calloc(len, 1));
+ else addr.reset();
+ }
+ if (len > sizeof(sa_family_t))
+ addr->sa_family = af;
+ }
+
+ /**
+ * Retreive the port (in host byte order) or 0 if the address is not
+ * of a supported family.
+ */
+ in_port_t getPort() const {
+ switch(getFamily()) {
+ case AF_INET:
+ return ntohs(getIPv4().sin_port);
+ case AF_INET6:
+ return ntohs(getIPv6().sin6_port);
+ default:
+ return 0;
+ }
+ }
+ /**
+ * Set the port. The address must be of a supported family.
+ * @param p The port in host byte order.
+ */
+ void setPort(in_port_t p) {
+ switch(getFamily()) {
+ case AF_INET:
+ getIPv4().sin_port = htons(p);
+ break;
+ case AF_INET6:
+ getIPv6().sin6_port = htons(p);
+ break;
+ }
+ }
+
+ /**
+ * Returns the accessible byte length at the pointer returned by #get().
+ * If zero, #get() returns null.
+ */
+ socklen_t getLength() const { return len; }
+
+ /**
+ * An address is defined to be true if its length is not zero.
+ */
+ explicit operator bool() const noexcept {
+ return len;
+ }
+
+ /**
+ * Returns the address to the managed sockaddr structure.
+ * The accessible length is returned by #getLength().
+ */
+ const sockaddr* get() const { return addr.get(); }
+
+ /**
+ * Returns the address to the managed sockaddr structure.
+ * The accessible length is returned by #getLength().
+ */
+ sockaddr* get() { return addr.get(); }
+
+ const sockaddr_in& getIPv4() const {
+ return *reinterpret_cast<const sockaddr_in*>(get());
+ }
+ const sockaddr_in6& getIPv6() const {
+ return *reinterpret_cast<const sockaddr_in6*>(get());
+ }
+ sockaddr_in& getIPv4() {
+ return *reinterpret_cast<sockaddr_in*>(get());
+ }
+ sockaddr_in6& getIPv6() {
+ return *reinterpret_cast<sockaddr_in6*>(get());
+ }
+
+ /**
+ * Return true if address is a loopback IP address.
+ */
+ bool isLoopback() const;
+
+ /**
+ * Return true if address is not a public IP address.
+ */
+ bool isPrivate() const;
+
+ bool isUnspecified() const;
+
+ bool isMappedIPv4() const;
+ SockAddr getMappedIPv4() const;
+
+ /**
+ * A comparator to classify IP addresses, only considering the
+ * first 64 bits in IPv6.
+ */
+ struct ipCmp {
+ bool operator()(const SockAddr& a, const SockAddr& b) const {
+ if (a.len != b.len)
+ return a.len < b.len;
+ socklen_t start, len;
+ switch(a.getFamily()) {
+ case AF_INET:
+ start = offsetof(sockaddr_in, sin_addr);
+ len = sizeof(in_addr);
+ break;
+ case AF_INET6:
+ start = offsetof(sockaddr_in6, sin6_addr);
+ // don't consider more than 64 bits (IPv6)
+ len = 8;
+ break;
+ default:
+ start = 0;
+ len = a.len;
+ break;
+ }
+ return std::memcmp((uint8_t*)a.get()+start,
+ (uint8_t*)b.get()+start, len) < 0;
+ }
+ };
+private:
+ socklen_t len {0};
+ struct free_delete { void operator()(void* p) { ::free(p); } };
+ std::unique_ptr<sockaddr, free_delete> addr {};
+
+ void set(const sockaddr* sa, socklen_t length) {
+ if (len != length) {
+ len = length;
+ if (len) addr.reset((sockaddr*)::malloc(len));
+ else addr.reset();
+ }
+ if (len)
+ std::memcpy((uint8_t*)get(), (const uint8_t*)sa, len);
+ }
+
+};
+
+OPENDHT_PUBLIC bool operator==(const SockAddr& a, const SockAddr& b);
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "def.h"
+
+#include <msgpack.hpp>
+
+#include <chrono>
+#include <random>
+#include <functional>
+#include <map>
+
+#include <cstdarg>
+
+#define WANT4 1
+#define WANT6 2
+
+/**
+ * OpenDHT C++ namespace
+ */
+namespace dht {
+
+using NetId = uint32_t;
+using want_t = int_fast8_t;
+
+// shortcut for std::shared_ptr
+template<class T>
+using Sp = std::shared_ptr<T>;
+
+template <typename Key, typename Item, typename Condition>
+void erase_if(std::map<Key, Item>& map, const Condition& condition)
+{
+ for (auto it = map.begin(); it != map.end(); ) {
+ if (condition(*it)) {
+ it = map.erase(it);
+ } else { ++it; }
+ }
+}
+
+/**
+ * Split "[host]:port" or "host:port" to pair<"host", "port">.
+ */
+OPENDHT_PUBLIC std::pair<std::string, std::string>
+splitPort(const std::string& s);
+
+class OPENDHT_PUBLIC DhtException : public std::runtime_error {
+public:
+ DhtException(const std::string &str = "") :
+ std::runtime_error("DhtException occurred: " + str) {}
+};
+
+class OPENDHT_PUBLIC SocketException : public DhtException {
+public:
+ SocketException(int err) :
+ DhtException(strerror(err)) {}
+};
+
+// Time related definitions and utility functions
+
+using clock = std::chrono::steady_clock;
+using time_point = clock::time_point;
+using duration = clock::duration;
+
+time_point from_time_t(std::time_t t);
+std::time_t to_time_t(time_point t);
+
+/**
+ * Converts std::chrono::duration to floating-point seconds.
+ */
+template <class DT>
+static double
+print_dt(DT d) {
+ return std::chrono::duration_cast<std::chrono::duration<double>>(d).count();
+}
+
+template <typename Duration = duration>
+class uniform_duration_distribution : public std::uniform_int_distribution<typename Duration::rep> {
+ using Base = std::uniform_int_distribution<typename Duration::rep>;
+ using param_type = typename Base::param_type;
+public:
+ uniform_duration_distribution(Duration min, Duration max) : Base(min.count(), max.count()) {}
+ template <class Generator>
+ Duration operator()(Generator && g) {
+ return Duration(Base::operator()(g));
+ }
+ template< class Generator >
+ Duration operator()( Generator && g, const param_type& params ) {
+ return Duration(Base::operator()(g, params));
+ }
+};
+
+// Serialization related definitions and utility functions
+
+/**
+ * Arbitrary binary data.
+ */
+using Blob = std::vector<uint8_t>;
+
+/**
+ * Provides backward compatibility with msgpack 1.0
+ */
+OPENDHT_PUBLIC Blob unpackBlob(msgpack::object& o);
+
+template <typename Type>
+Blob
+packMsg(const Type& t) {
+ msgpack::sbuffer buffer;
+ msgpack::packer<msgpack::sbuffer> pk(&buffer);
+ pk.pack(t);
+ return {buffer.data(), buffer.data()+buffer.size()};
+}
+
+template <typename Type>
+Type
+unpackMsg(Blob b) {
+ msgpack::unpacked msg_res = msgpack::unpack((const char*)b.data(), b.size());
+ return msg_res.get().as<Type>();
+}
+
+msgpack::unpacked unpackMsg(Blob b);
+
+msgpack::object* findMapValue(msgpack::object& map, const std::string& key);
+
+} // namespace dht
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ * Simon Désaulniers <simon.desaulniers@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "infohash.h"
+#include "crypto.h"
+#include "utils.h"
+#include "sockaddr.h"
+
+#include <msgpack.hpp>
+
+#include <string>
+#include <sstream>
+#include <bitset>
+#include <vector>
+#include <iostream>
+#include <algorithm>
+#include <functional>
+#include <memory>
+#include <chrono>
+#include <set>
+
+#ifdef OPENDHT_JSONCPP
+#include <json/json.h>
+#endif
+
+namespace dht {
+
+struct Value;
+struct Query;
+
+/**
+ * A storage policy is applied once to every incoming value storage requests.
+ * If the policy returns false, the value is dropped.
+ *
+ * @param key: the key where the storage is requested.
+ * @param value: the value to be stored. The value can be edited by the storage policy.
+ * @param from: id of the requesting node.
+ * @param form_addr: network address of the incoming request.
+ * @param from_len: network address lendth of the incoming request.
+ */
+using StorePolicy = std::function<bool(InfoHash key, std::shared_ptr<Value>& value, const InfoHash& from, const SockAddr& addr)>;
+
+/**
+ * An edition policy is applied once to every incoming value storage requests,
+ * if a value already exists for this key and value id.
+ * If the policy returns false, the edition request is ignored.
+ * The default behavior is to deny edition (see {ValueType::DEFAULT_EDIT_POLICY}).
+ * Some {ValueType}s may override this behavior (e.g. SignedValue).
+ *
+ * @param key: the key where the value is stored.
+ * @param old_val: the previously stored value.
+ * @param new_val: the new value to be stored. The value can be edited by the edit policy.
+ * @param from: id of the requesting node.
+ * @param form_addr: network address of the incoming request.
+ * @param from_len: network address lendth of the incoming request.
+ */
+using EditPolicy = std::function<bool(InfoHash key, const std::shared_ptr<Value>& old_val, std::shared_ptr<Value>& new_val, const InfoHash& from, const SockAddr& addr)>;
+
+static constexpr const size_t MAX_VALUE_SIZE {1024 * 64};
+
+struct OPENDHT_PUBLIC ValueType {
+ typedef uint16_t Id;
+
+ static bool DEFAULT_STORE_POLICY(InfoHash, std::shared_ptr<Value>& v, const InfoHash&, const SockAddr&);
+ static bool DEFAULT_EDIT_POLICY(InfoHash, const std::shared_ptr<Value>&, std::shared_ptr<Value>&, const InfoHash&, const SockAddr&) {
+ return false;
+ }
+
+ ValueType () {}
+
+ ValueType (Id id, std::string name, duration e = std::chrono::minutes(10))
+ : id(id), name(name), expiration(e) {}
+
+ ValueType (Id id, std::string name, duration e, StorePolicy sp, EditPolicy ep = DEFAULT_EDIT_POLICY)
+ : id(id), name(name), expiration(e), storePolicy(sp), editPolicy(ep) {}
+
+ virtual ~ValueType() {}
+
+ bool operator==(const ValueType& o) {
+ return id == o.id;
+ }
+
+ // Generic value type
+ static const ValueType USER_DATA;
+
+
+ Id id {0};
+ std::string name {};
+ duration expiration {60 * 10};
+ StorePolicy storePolicy {DEFAULT_STORE_POLICY};
+ EditPolicy editPolicy {DEFAULT_EDIT_POLICY};
+};
+
+class TypeStore {
+public:
+ void registerType(const ValueType& type) {
+ types[type.id] = type;
+ }
+ const ValueType& getType(ValueType::Id type_id) const {
+ const auto& t_it = types.find(type_id);
+ return (t_it == types.end()) ? ValueType::USER_DATA : t_it->second;
+ }
+private:
+ std::map<ValueType::Id, ValueType> types {};
+};
+
+struct CryptoValueCache;
+
+/**
+ * A "value" is data potentially stored on the Dht, with some metadata.
+ *
+ * It can be an IP:port announced for a service, a public key, or any kind of
+ * light user-defined data (recommended: less than 512 bytes).
+ *
+ * Values are stored at a given InfoHash in the Dht, but also have a
+ * unique ID to distinguish between values stored at the same location.
+ */
+struct OPENDHT_PUBLIC Value
+{
+ enum class Field : int {
+ None = 0,
+ Id, /* Value::id */
+ ValueType, /* Value::type */
+ OwnerPk, /* Value::owner */
+ SeqNum, /* Value::seq */
+ UserType, /* Value::user_type */
+
+ COUNT /* the total number of fields */
+ };
+
+ typedef uint64_t Id;
+ static const constexpr Id INVALID_ID {0};
+
+ class Filter : public std::function<bool(const Value&)> {
+ public:
+ Filter() {}
+
+ template<typename Functor>
+ Filter(Functor f) : std::function<bool(const Value&)>::function(f) {}
+
+ Filter chain(Filter&& f2) {
+ auto f1 = *this;
+ return chain(std::move(f1), std::move(f2));
+ }
+ Filter chainOr(Filter&& f2) {
+ auto f1 = *this;
+ return chainOr(std::move(f1), std::move(f2));
+ }
+ static Filter chain(Filter&& f1, Filter&& f2) {
+ if (not f1) return f2;
+ if (not f2) return f1;
+ return [f1,f2](const Value& v) {
+ return f1(v) and f2(v);
+ };
+ }
+ static Filter chainAll(std::vector<Filter>&& set) {
+ if (set.empty()) return {};
+ return std::bind([](const Value& v, std::vector<Filter>& s) {
+ for (const auto& f : s)
+ if (f and not f(v))
+ return false;
+ return true;
+ }, std::placeholders::_1, std::move(set));
+ }
+ static Filter chain(std::initializer_list<Filter> l) {
+ return chainAll(std::vector<Filter>(l.begin(), l.end()));
+ }
+ static Filter chainOr(Filter&& f1, Filter&& f2) {
+ if (not f1 or not f2) return AllFilter();
+ return [f1,f2](const Value& v) {
+ return f1(v) or f2(v);
+ };
+ }
+ std::vector<Sp<Value>> filter(const std::vector<Sp<Value>>& values) {
+ if (not (*this))
+ return values;
+ std::vector<Sp<Value>> ret;
+ for (const auto& v : values)
+ if ((*this)(v))
+ ret.emplace_back(v);
+ return ret;
+ }
+ };
+
+ /* Sneaky functions disguised in classes */
+
+ static const Filter AllFilter() {
+ return {};
+ }
+
+ static Filter TypeFilter(const ValueType& t) {
+ const auto tid = t.id;
+ return [tid](const Value& v) {
+ return v.type == tid;
+ };
+ }
+ static Filter TypeFilter(const ValueType::Id& tid) {
+ return [tid](const Value& v) {
+ return v.type == tid;
+ };
+ }
+
+ static Filter IdFilter(const Id id) {
+ return [id](const Value& v) {
+ return v.id == id;
+ };
+ }
+
+ static Filter RecipientFilter(const InfoHash& r) {
+ return [r](const Value& v) {
+ return v.recipient == r;
+ };
+ }
+
+ static Filter OwnerFilter(const crypto::PublicKey& pk) {
+ return OwnerFilter(pk.getId());
+ }
+
+ static Filter OwnerFilter(const InfoHash& pkh) {
+ return [pkh](const Value& v) {
+ return v.owner and v.owner->getId() == pkh;
+ };
+ }
+
+ static Filter SeqNumFilter(uint16_t seq_no) {
+ return [seq_no](const Value& v) {
+ return v.seq == seq_no;
+ };
+ }
+
+ static Filter UserTypeFilter(const std::string& ut) {
+ return [ut](const Value& v) {
+ return v.user_type == ut;
+ };
+ }
+
+ class SerializableBase
+ {
+ public:
+ SerializableBase() {}
+ virtual ~SerializableBase() {};
+ virtual const ValueType& getType() const = 0;
+ virtual void unpackValue(const Value& v) = 0;
+ virtual Value packValue() const = 0;
+ };
+
+ template <typename Derived, typename Base=SerializableBase>
+ class Serializable : public Base
+ {
+ public:
+ using Base::Base;
+
+ virtual const ValueType& getType() const {
+ return Derived::TYPE;
+ }
+
+ virtual void unpackValue(const Value& v) {
+ auto msg = msgpack::unpack((const char*)v.data.data(), v.data.size());
+ msg.get().convert(*static_cast<Derived*>(this));
+ }
+
+ virtual Value packValue() const {
+ return Value {getType(), static_cast<const Derived&>(*this)};
+ }
+ };
+
+ template <typename T,
+ typename std::enable_if<std::is_base_of<SerializableBase, T>::value, T>::type* = nullptr>
+ static Value pack(const T& obj)
+ {
+ return obj.packValue();
+ }
+
+ template <typename T,
+ typename std::enable_if<!std::is_base_of<SerializableBase, T>::value, T>::type* = nullptr>
+ static Value pack(const T& obj)
+ {
+ return {ValueType::USER_DATA.id, packMsg<T>(obj)};
+ }
+
+ template <typename T,
+ typename std::enable_if<std::is_base_of<SerializableBase, T>::value, T>::type* = nullptr>
+ static T unpack(const Value& v)
+ {
+ T msg;
+ msg.unpackValue(v);
+ return msg;
+ }
+
+ template <typename T,
+ typename std::enable_if<!std::is_base_of<SerializableBase, T>::value, T>::type* = nullptr>
+ static T unpack(const Value& v)
+ {
+ return unpackMsg<T>(v.data);
+ }
+
+ template <typename T>
+ T unpack()
+ {
+ return unpack<T>(*this);
+ }
+
+ bool isEncrypted() const {
+ return not cypher.empty();
+ }
+ bool isSigned() const {
+ return owner and not signature.empty();
+ }
+
+ /**
+ * Sign the value using the provided private key.
+ * Afterward, checkSignature() will return true and owner will
+ * be set to the corresponding public key.
+ */
+ void sign(const crypto::PrivateKey& key) {
+ if (isEncrypted())
+ throw DhtException("Can't sign encrypted data.");
+ owner = std::make_shared<const crypto::PublicKey>(key.getPublicKey());
+ signature = key.sign(getToSign());
+ }
+
+ /**
+ * Check that the value is signed and that the signature matches.
+ * If true, the owner field will contain the signer public key.
+ */
+ bool checkSignature() const {
+ return isSigned() and owner->checkSignature(getToSign(), signature);
+ }
+
+ std::shared_ptr<const crypto::PublicKey> getOwner() const {
+ return std::static_pointer_cast<const crypto::PublicKey>(owner);
+ }
+
+ /**
+ * Sign the value with from and returns the encrypted version for to.
+ */
+ Value encrypt(const crypto::PrivateKey& from, const crypto::PublicKey& to) {
+ if (isEncrypted())
+ throw DhtException("Data is already encrypted.");
+ setRecipient(to.getId());
+ sign(from);
+ Value nv {id};
+ nv.setCypher(to.encrypt(getToEncrypt()));
+ return nv;
+ }
+
+ Value() {}
+
+ Value (Id id) : id(id) {}
+
+ /** Generic constructor */
+ Value(ValueType::Id t, const Blob& data, Id id = INVALID_ID)
+ : id(id), type(t), data(data) {}
+ Value(ValueType::Id t, Blob&& data, Id id = INVALID_ID)
+ : id(id), type(t), data(std::move(data)) {}
+ Value(ValueType::Id t, const uint8_t* dat_ptr, size_t dat_len, Id id = INVALID_ID)
+ : id(id), type(t), data(dat_ptr, dat_ptr+dat_len) {}
+
+#ifdef OPENDHT_JSONCPP
+ /**
+ * Build a value from a json object
+ * @param json
+ */
+ Value(Json::Value& json);
+#endif
+
+ template <typename Type>
+ Value(ValueType::Id t, const Type& d, Id id = INVALID_ID)
+ : id(id), type(t), data(packMsg(d)) {}
+
+ template <typename Type>
+ Value(const ValueType& t, const Type& d, Id id = INVALID_ID)
+ : id(id), type(t.id), data(packMsg(d)) {}
+
+ /** Custom user data constructor */
+ Value(const Blob& userdata) : data(userdata) {}
+ Value(Blob&& userdata) : data(std::move(userdata)) {}
+ Value(const uint8_t* dat_ptr, size_t dat_len) : data(dat_ptr, dat_ptr+dat_len) {}
+
+ Value(Value&& o) noexcept
+ : id(o.id), owner(std::move(o.owner)), recipient(o.recipient),
+ type(o.type), data(std::move(o.data)), user_type(std::move(o.user_type)), seq(o.seq), signature(std::move(o.signature)), cypher(std::move(o.cypher)) {}
+
+ template <typename Type>
+ Value(const Type& vs)
+ : Value(pack<Type>(vs)) {}
+
+ /**
+ * Unpack a serialized value
+ */
+ Value(const msgpack::object& o) {
+ msgpack_unpack(o);
+ }
+
+ inline bool operator== (const Value& o) {
+ return id == o.id &&
+ (isEncrypted() ? cypher == o.cypher :
+ ((owner == o.owner || *owner == *o.owner) && type == o.type && data == o.data && user_type == o.user_type && signature == o.signature));
+ }
+
+ void setRecipient(const InfoHash& r) {
+ recipient = r;
+ }
+
+ void setCypher(Blob&& c) {
+ cypher = std::move(c);
+ }
+
+ /**
+ * Pack part of the data to be signed (must always be done the same way)
+ */
+ Blob getToSign() const {
+ msgpack::sbuffer buffer;
+ msgpack::packer<msgpack::sbuffer> pk(&buffer);
+ msgpack_pack_to_sign(pk);
+ return {buffer.data(), buffer.data()+buffer.size()};
+ }
+
+ /**
+ * Pack part of the data to be encrypted
+ */
+ Blob getToEncrypt() const {
+ msgpack::sbuffer buffer;
+ msgpack::packer<msgpack::sbuffer> pk(&buffer);
+ msgpack_pack_to_encrypt(pk);
+ return {buffer.data(), buffer.data()+buffer.size()};
+ }
+
+ /** print value for debugging */
+ OPENDHT_PUBLIC friend std::ostream& operator<< (std::ostream& s, const Value& v);
+
+ std::string toString() const {
+ std::stringstream ss;
+ ss << *this;
+ return ss.str();
+ }
+
+#ifdef OPENDHT_JSONCPP
+ /**
+ * Build a json object from a value
+ * Example:
+ * {
+ * "data":"base64ofdata",
+ * id":"0", "seq":0,"type":3
+ * }
+ */
+ Json::Value toJson() const;
+#endif
+
+ /** Return the size in bytes used by this value in memory (minimum). */
+ size_t size() const;
+
+ template <typename Packer>
+ void msgpack_pack_to_sign(Packer& pk) const
+ {
+ bool has_owner = owner && *owner;
+ pk.pack_map((user_type.empty()?0:1) + (has_owner?(recipient ? 5 : 4):2));
+ if (has_owner) { // isSigned
+ pk.pack(std::string("seq")); pk.pack(seq);
+ pk.pack(std::string("owner")); owner->msgpack_pack(pk);
+ if (recipient) {
+ pk.pack(std::string("to")); pk.pack(recipient);
+ }
+ }
+ pk.pack(std::string("type")); pk.pack(type);
+ pk.pack(std::string("data")); pk.pack_bin(data.size());
+ pk.pack_bin_body((const char*)data.data(), data.size());
+ if (not user_type.empty()) {
+ pk.pack(std::string("utype")); pk.pack(user_type);
+ }
+ }
+
+ template <typename Packer>
+ void msgpack_pack_to_encrypt(Packer& pk) const
+ {
+ if (isEncrypted()) {
+ pk.pack_bin(cypher.size());
+ pk.pack_bin_body((const char*)cypher.data(), cypher.size());
+ } else {
+ pk.pack_map(isSigned() ? 2 : 1);
+ pk.pack(std::string("body")); msgpack_pack_to_sign(pk);
+ if (isSigned()) {
+ pk.pack(std::string("sig")); pk.pack_bin(signature.size());
+ pk.pack_bin_body((const char*)signature.data(), signature.size());
+ }
+ }
+ }
+
+ template <typename Packer>
+ void msgpack_pack(Packer& pk) const
+ {
+ pk.pack_map(2);
+ pk.pack(std::string("id")); pk.pack(id);
+ pk.pack(std::string("dat")); msgpack_pack_to_encrypt(pk);
+ }
+
+ template <typename Packer>
+ void msgpack_pack_fields(const std::set<Value::Field>& fields, Packer& pk) const
+ {
+ for (const auto& field : fields)
+ switch (field) {
+ case Value::Field::Id:
+ pk.pack(static_cast<uint64_t>(id));
+ break;
+ case Value::Field::ValueType:
+ pk.pack(static_cast<uint64_t>(type));
+ break;
+ case Value::Field::OwnerPk:
+ if (owner)
+ owner->msgpack_pack(pk);
+ else
+ InfoHash().msgpack_pack(pk);
+ break;
+ case Value::Field::SeqNum:
+ pk.pack(static_cast<uint64_t>(seq));
+ break;
+ case Value::Field::UserType:
+ pk.pack(user_type);
+ break;
+ default:
+ break;
+ }
+ }
+
+ void msgpack_unpack(msgpack::object o);
+ void msgpack_unpack_body(const msgpack::object& o);
+ Blob getPacked() const {
+ msgpack::sbuffer buffer;
+ msgpack::packer<msgpack::sbuffer> pk(&buffer);
+ pk.pack(*this);
+ return {buffer.data(), buffer.data()+buffer.size()};
+ }
+
+ void msgpack_unpack_fields(const std::set<Value::Field>& fields, const msgpack::object& o, unsigned offset);
+
+ Id id {INVALID_ID};
+
+ /**
+ * Public key of the signer.
+ */
+ std::shared_ptr<const crypto::PublicKey> owner {};
+
+ /**
+ * Hash of the recipient (optional).
+ * Should only be present for encrypted values.
+ * Can optionally be present for signed values.
+ */
+ InfoHash recipient {};
+
+ /**
+ * Type of data.
+ */
+ ValueType::Id type {ValueType::USER_DATA.id};
+ Blob data {};
+
+ /**
+ * Custom user-defined type
+ */
+ std::string user_type {};
+
+ /**
+ * Sequence number to avoid replay attacks
+ */
+ uint16_t seq {0};
+
+ /**
+ * Optional signature.
+ */
+ Blob signature {};
+
+ /**
+ * Hold encrypted version of the data.
+ */
+ Blob cypher {};
+
+private:
+ friend class SecureDht;
+ /* Cache for crypto ops */
+ bool signatureChecked {false};
+ bool signatureValid {false};
+ bool decrypted {false};
+ Sp<Value> decryptedValue {};
+};
+
+using ValuesExport = std::pair<InfoHash, Blob>;
+
+/**
+ * @class FieldValue
+ * @brief Describes a value filter.
+ * @details
+ * This structure holds the value for a specified field. It's type can either be
+ * uint64_t, InfoHash or Blob.
+ */
+struct OPENDHT_PUBLIC FieldValue
+{
+ FieldValue() {}
+ FieldValue(Value::Field f, uint64_t int_value) : field(f), intValue(int_value) {}
+ FieldValue(Value::Field f, InfoHash hash_value) : field(f), hashValue(hash_value) {}
+ FieldValue(Value::Field f, Blob blob_value) : field(f), blobValue(blob_value) {}
+
+ bool operator==(const FieldValue& fd) const;
+
+ // accessors
+ Value::Field getField() const { return field; }
+ uint64_t getInt() const { return intValue; }
+ InfoHash getHash() const { return hashValue; }
+ Blob getBlob() const { return blobValue; }
+
+ template <typename Packer>
+ void msgpack_pack(Packer& p) const {
+ p.pack_map(2);
+ p.pack(std::string("f")); p.pack(static_cast<uint8_t>(field));
+
+ p.pack(std::string("v"));
+ switch (field) {
+ case Value::Field::Id:
+ case Value::Field::ValueType:
+ p.pack(intValue);
+ break;
+ case Value::Field::OwnerPk:
+ p.pack(hashValue);
+ break;
+ case Value::Field::UserType:
+ p.pack_bin(blobValue.size());
+ p.pack_bin_body((const char*)blobValue.data(), blobValue.size());
+ break;
+ default:
+ throw msgpack::type_error();
+ }
+ }
+
+ void msgpack_unpack(msgpack::object msg) {
+ hashValue = {};
+ blobValue.clear();
+
+ if (auto f = findMapValue(msg, "f"))
+ field = (Value::Field)f->as<unsigned>();
+ else
+ throw msgpack::type_error();
+
+ auto v = findMapValue(msg, "v");
+ if (not v)
+ throw msgpack::type_error();
+ else
+ switch (field) {
+ case Value::Field::Id:
+ case Value::Field::ValueType:
+ intValue = v->as<decltype(intValue)>();
+ break;
+ case Value::Field::OwnerPk:
+ hashValue = v->as<decltype(hashValue)>();
+ break;
+ case Value::Field::UserType:
+ blobValue = unpackBlob(*v);
+ break;
+ default:
+ throw msgpack::type_error();
+ }
+ }
+
+ Value::Filter getLocalFilter() const;
+
+private:
+ Value::Field field {Value::Field::None};
+ // three possible value types
+ uint64_t intValue {};
+ InfoHash hashValue {};
+ Blob blobValue {};
+};
+
+/**
+ * @class Select
+ * @brief Serializable Value field selection.
+ * @details
+ * This is a container for a list of FieldSelectorDescription instances. It
+ * describes a complete SELECT query for dht::Value.
+ */
+struct OPENDHT_PUBLIC Select
+{
+ Select() { }
+ Select(const std::string& q_str);
+
+ bool isSatisfiedBy(const Select& os) const;
+
+ /**
+ * Selects a field of type Value::Field.
+ *
+ * @param field the field to require.
+ *
+ * @return the resulting Select instance.
+ */
+ Select& field(Value::Field field) {
+ if (std::find(fieldSelection_.begin(), fieldSelection_.end(), field) == fieldSelection_.end())
+ fieldSelection_.emplace_back(field);
+ return *this;
+ }
+
+ /**
+ * Computes the set of selected fields based on previous require* calls.
+ *
+ * @return the set of fields.
+ */
+ std::set<Value::Field> getSelection() const {
+ return {fieldSelection_.begin(), fieldSelection_.end()};
+ }
+
+ template <typename Packer>
+ void msgpack_pack(Packer& pk) const { pk.pack(fieldSelection_); }
+ void msgpack_unpack(const msgpack::object& o) {
+ fieldSelection_ = o.as<decltype(fieldSelection_)>();
+ }
+
+ std::string toString() const {
+ std::stringstream ss;
+ ss << *this;
+ return ss.str();
+ }
+
+ OPENDHT_PUBLIC friend std::ostream& operator<<(std::ostream& s, const dht::Select& q);
+private:
+ std::vector<Value::Field> fieldSelection_ {};
+};
+
+/**
+ * @class Where
+ * @brief Serializable dht::Value filter.
+ * @details
+ * This is container for a list of FieldValue instances. It describes a
+ * complete WHERE query for dht::Value.
+ */
+struct OPENDHT_PUBLIC Where
+{
+ Where() { }
+ Where(const std::string& q_str);
+
+ bool isSatisfiedBy(const Where& where) const;
+
+ /**
+ * Adds restriction on Value::Id based on the id argument.
+ *
+ * @param id the id.
+ *
+ * @return the resulting Where instance.
+ */
+ Where& id(Value::Id id) {
+ FieldValue fv {Value::Field::Id, id};
+ if (std::find(filters_.begin(), filters_.end(), fv) == filters_.end())
+ filters_.emplace_back(std::move(fv));
+ return *this;
+ }
+
+ /**
+ * Adds restriction on Value::ValueType based on the type argument.
+ *
+ * @param type the value type.
+ *
+ * @return the resulting Where instance.
+ */
+ Where& valueType(ValueType::Id type) {
+ FieldValue fv {Value::Field::ValueType, type};
+ if (std::find(filters_.begin(), filters_.end(), fv) == filters_.end())
+ filters_.emplace_back(std::move(fv));
+ return *this;
+ }
+
+ /**
+ * Adds restriction on Value::OwnerPk based on the owner_pk_hash argument.
+ *
+ * @param owner_pk_hash the owner public key fingerprint.
+ *
+ * @return the resulting Where instance.
+ */
+ Where& owner(InfoHash owner_pk_hash) {
+ FieldValue fv {Value::Field::OwnerPk, owner_pk_hash};
+ if (std::find(filters_.begin(), filters_.end(), fv) == filters_.end())
+ filters_.emplace_back(std::move(fv));
+ return *this;
+ }
+
+ /**
+ * Adds restriction on Value::OwnerPk based on the owner_pk_hash argument.
+ *
+ * @param owner_pk_hash the owner public key fingerprint.
+ *
+ * @return the resulting Where instance.
+ */
+ Where& seq(uint16_t seq_no) {
+ FieldValue fv {Value::Field::SeqNum, seq_no};
+ if (std::find(filters_.begin(), filters_.end(), fv) == filters_.end())
+ filters_.emplace_back(std::move(fv));
+ return *this;
+ }
+
+ /**
+ * Adds restriction on Value::UserType based on the user_type argument.
+ *
+ * @param user_type the user type.
+ *
+ * @return the resulting Where instance.
+ */
+ Where& userType(std::string user_type) {
+ FieldValue fv {Value::Field::UserType, Blob {user_type.begin(), user_type.end()}};
+ if (std::find(filters_.begin(), filters_.end(), fv) == filters_.end())
+ filters_.emplace_back(std::move(fv));
+ return *this;
+ }
+
+ /**
+ * Computes the Value::Filter based on the list of field value set.
+ *
+ * @return the resulting Value::Filter.
+ */
+ Value::Filter getFilter() const {
+ if (filters_.empty()) return {};
+ std::vector<Value::Filter> fset;
+ fset.reserve(filters_.size());
+ for (const auto& f : filters_) {
+ if (auto lf = f.getLocalFilter())
+ fset.emplace_back(std::move(lf));
+ }
+ return Value::Filter::chainAll(std::move(fset));
+ }
+
+ template <typename Packer>
+ void msgpack_pack(Packer& pk) const { pk.pack(filters_); }
+ void msgpack_unpack(const msgpack::object& o) {
+ filters_.clear();
+ filters_ = o.as<decltype(filters_)>();
+ }
+
+ std::string toString() const {
+ std::stringstream ss;
+ ss << *this;
+ return ss.str();
+ }
+
+ bool empty() const {
+ return filters_.empty();
+ }
+
+ OPENDHT_PUBLIC friend std::ostream& operator<<(std::ostream& s, const dht::Where& q);
+
+private:
+ std::vector<FieldValue> filters_;
+};
+
+/**
+ * @class Query
+ * @brief Describes a query destined to another peer.
+ * @details
+ * This class describes the list of filters on field values and the field
+ * itselves to include in the peer response to a GET operation. See
+ * FieldValue.
+ */
+struct OPENDHT_PUBLIC Query
+{
+ static const std::string QUERY_PARSE_ERROR;
+
+ Query(Select s = {}, Where w = {}, bool none = false) : select(s), where(w), none(none) { };
+
+ /**
+ * Initializes a query based on a SQL-ish formatted string. The abstract
+ * form of such a string is the following:
+ *
+ * [SELECT $field$ [WHERE $field$=$value$]]
+ *
+ * where
+ *
+ * - $field$ = *|id|value_type|owner_pk|user_type
+ * - $value$ = $string$|$integer$
+ * - $string$: a simple string WITHOUT SPACES.
+ * - $integer$: a simple integer.
+ */
+ Query(std::string q_str) {
+ auto pos_W = q_str.find("WHERE");
+ auto pos_w = q_str.find("where");
+ auto pos = std::min(pos_W != std::string::npos ? pos_W : q_str.size(),
+ pos_w != std::string::npos ? pos_w : q_str.size());
+ select = q_str.substr(0, pos);
+ where = q_str.substr(pos, q_str.size()-pos);
+ }
+
+ /**
+ * Tell if the query is satisfied by another query.
+ */
+ bool isSatisfiedBy(const Query& q) const;
+
+ template <typename Packer>
+ void msgpack_pack(Packer& pk) const {
+ pk.pack_map(2);
+ pk.pack(std::string("s")); pk.pack(select); /* packing field selectors */
+ pk.pack(std::string("w")); pk.pack(where); /* packing filters */
+ }
+
+ void msgpack_unpack(const msgpack::object& o);
+
+ std::string toString() const {
+ std::stringstream ss;
+ ss << *this;
+ return ss.str();
+ }
+
+ OPENDHT_PUBLIC friend std::ostream& operator<<(std::ostream& s, const dht::Query& q) {
+ return s << "Query[" << q.select << " " << q.where << "]";
+ }
+
+ Select select {};
+ Where where {};
+ bool none {false}; /* When true, any query satisfies this. */
+};
+
+/*!
+ * @class FieldValueIndex
+ * @brief An index for field values.
+ * @details
+ * This structures is meant to manipulate a subset of fields normally contained
+ * in Value.
+ */
+struct OPENDHT_PUBLIC FieldValueIndex {
+ FieldValueIndex() {}
+ FieldValueIndex(const Value& v, Select s = {});
+ /**
+ * Tells if all the fields of this are contained in the other
+ * FieldValueIndex with the same value.
+ *
+ * @param other The other FieldValueIndex instance.
+ */
+ bool containedIn(const FieldValueIndex& other) const;
+
+ OPENDHT_PUBLIC friend std::ostream& operator<<(std::ostream& os, const FieldValueIndex& fvi);
+
+ void msgpack_unpack_fields(const std::set<Value::Field>& fields,
+ const msgpack::object& o,
+ unsigned offset);
+
+ std::map<Value::Field, FieldValue> index {};
+};
+
+template <typename T,
+ typename std::enable_if<std::is_base_of<Value::SerializableBase, T>::value, T>::type* = nullptr>
+Value::Filter
+getFilterSet(Value::Filter f)
+{
+ return Value::Filter::chain({
+ Value::TypeFilter(T::TYPE),
+ T::getFilter(),
+ f
+ });
+}
+
+template <typename T,
+ typename std::enable_if<!std::is_base_of<Value::SerializableBase, T>::value, T>::type* = nullptr>
+Value::Filter
+getFilterSet(Value::Filter f)
+{
+ return f;
+}
+
+template <typename T,
+ typename std::enable_if<std::is_base_of<Value::SerializableBase, T>::value, T>::type* = nullptr>
+Value::Filter
+getFilterSet()
+{
+ return Value::Filter::chain({
+ Value::TypeFilter(T::TYPE),
+ T::getFilter()
+ });
+}
+
+template <typename T,
+ typename std::enable_if<!std::is_base_of<Value::SerializableBase, T>::value, T>::type* = nullptr>
+Value::Filter
+getFilterSet()
+{
+ return Value::AllFilter();
+}
+
+template <class T>
+std::vector<T>
+unpackVector(const std::vector<std::shared_ptr<Value>>& vals) {
+ std::vector<T> ret;
+ ret.reserve(vals.size());
+ for (const auto& v : vals) {
+ try {
+ ret.emplace_back(Value::unpack<T>(*v));
+ } catch (const std::exception&) {}
+ }
+ return ret;
+}
+
+#ifdef OPENDHT_JSONCPP
+uint64_t unpackId(const Json::Value& json, const std::string& key);
+#endif
+
+}
+
+MSGPACK_ADD_ENUM(dht::Value::Field)
--- /dev/null
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_CXX_COMPILE_STDCXX(VERSION, [ext|noext], [mandatory|optional])
+#
+# DESCRIPTION
+#
+# Check for baseline language coverage in the compiler for the specified
+# version of the C++ standard. If necessary, add switches to CXX and
+# CXXCPP to enable support. VERSION may be '11' (for the C++11 standard)
+# or '14' (for the C++14 standard).
+#
+# The second argument, if specified, indicates whether you insist on an
+# extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g.
+# -std=c++11). If neither is specified, you get whatever works, with
+# preference for an extended mode.
+#
+# The third argument, if specified 'mandatory' or if left unspecified,
+# indicates that baseline support for the specified C++ standard is
+# required and that the macro should error out if no mode with that
+# support is found. If specified 'optional', then configuration proceeds
+# regardless, after defining HAVE_CXX${VERSION} if and only if a
+# supporting mode is found.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Benjamin Kosnik <bkoz@redhat.com>
+# Copyright (c) 2012 Zack Weinberg <zackw@panix.com>
+# Copyright (c) 2013 Roy Stogner <roystgnr@ices.utexas.edu>
+# Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov <sokolov@google.com>
+# Copyright (c) 2015 Paul Norman <penorman@mac.com>
+# Copyright (c) 2015 Moritz Klammler <moritz@klammler.eu>
+#
+# Copying and distribution of this file, with or without modification, are
+# permitted in any medium without royalty provided the copyright notice
+# and this notice are preserved. This file is offered as-is, without any
+# warranty.
+
+#serial 4
+
+dnl This macro is based on the code from the AX_CXX_COMPILE_STDCXX_11 macro
+dnl (serial version number 13).
+
+AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl
+ m4_if([$1], [11], [],
+ [$1], [14], [],
+ [$1], [17], [m4_fatal([support for C++17 not yet implemented in AX_CXX_COMPILE_STDCXX])],
+ [m4_fatal([invalid first argument `$1' to AX_CXX_COMPILE_STDCXX])])dnl
+ m4_if([$2], [], [],
+ [$2], [ext], [],
+ [$2], [noext], [],
+ [m4_fatal([invalid second argument `$2' to AX_CXX_COMPILE_STDCXX])])dnl
+ m4_if([$3], [], [ax_cxx_compile_cxx$1_required=true],
+ [$3], [mandatory], [ax_cxx_compile_cxx$1_required=true],
+ [$3], [optional], [ax_cxx_compile_cxx$1_required=false],
+ [m4_fatal([invalid third argument `$3' to AX_CXX_COMPILE_STDCXX])])
+ AC_LANG_PUSH([C++])dnl
+ ac_success=no
+ AC_CACHE_CHECK(whether $CXX supports C++$1 features by default,
+ ax_cv_cxx_compile_cxx$1,
+ [AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
+ [ax_cv_cxx_compile_cxx$1=yes],
+ [ax_cv_cxx_compile_cxx$1=no])])
+ if test x$ax_cv_cxx_compile_cxx$1 = xyes; then
+ ac_success=yes
+ fi
+
+ m4_if([$2], [noext], [], [dnl
+ if test x$ac_success = xno; then
+ for switch in -std=gnu++$1 -std=gnu++0x; do
+ cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
+ AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
+ $cachevar,
+ [ac_save_CXX="$CXX"
+ CXX="$CXX $switch"
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
+ [eval $cachevar=yes],
+ [eval $cachevar=no])
+ CXX="$ac_save_CXX"])
+ if eval test x\$$cachevar = xyes; then
+ CXX="$CXX $switch"
+ if test -n "$CXXCPP" ; then
+ CXXCPP="$CXXCPP $switch"
+ fi
+ ac_success=yes
+ break
+ fi
+ done
+ fi])
+
+ m4_if([$2], [ext], [], [dnl
+ if test x$ac_success = xno; then
+ dnl HP's aCC needs +std=c++11 according to:
+ dnl http://h21007.www2.hp.com/portal/download/files/unprot/aCxx/PDF_Release_Notes/769149-001.pdf
+ dnl Cray's crayCC needs "-h std=c++11"
+ for switch in -std=c++$1 -std=c++0x +std=c++$1 "-h std=c++$1"; do
+ cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
+ AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
+ $cachevar,
+ [ac_save_CXX="$CXX"
+ CXX="$CXX $switch"
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
+ [eval $cachevar=yes],
+ [eval $cachevar=no])
+ CXX="$ac_save_CXX"])
+ if eval test x\$$cachevar = xyes; then
+ CXX="$CXX $switch"
+ if test -n "$CXXCPP" ; then
+ CXXCPP="$CXXCPP $switch"
+ fi
+ ac_success=yes
+ break
+ fi
+ done
+ fi])
+ AC_LANG_POP([C++])
+ if test x$ax_cxx_compile_cxx$1_required = xtrue; then
+ if test x$ac_success = xno; then
+ AC_MSG_ERROR([*** A compiler with support for C++$1 language features is required.])
+ fi
+ fi
+ if test x$ac_success = xno; then
+ HAVE_CXX$1=0
+ AC_MSG_NOTICE([No compiler with C++$1 support was found])
+ else
+ HAVE_CXX$1=1
+ AC_DEFINE(HAVE_CXX$1,1,
+ [define if the compiler supports basic C++$1 syntax])
+ fi
+ AC_SUBST(HAVE_CXX$1)
+])
+
+
+dnl Test body for checking C++11 support
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_11],
+ _AX_CXX_COMPILE_STDCXX_testbody_new_in_11
+)
+
+
+dnl Test body for checking C++14 support
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_14],
+ _AX_CXX_COMPILE_STDCXX_testbody_new_in_11
+ _AX_CXX_COMPILE_STDCXX_testbody_new_in_14
+)
+
+
+dnl Tests for new features in C++11
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_11], [[
+
+// If the compiler admits that it is not ready for C++11, why torture it?
+// Hopefully, this will speed up the test.
+
+#ifndef __cplusplus
+
+#error "This is not a C++ compiler"
+
+#elif __cplusplus < 201103L
+
+#error "This is not a C++11 compiler"
+
+#else
+
+namespace cxx11
+{
+
+ namespace test_static_assert
+ {
+
+ template <typename T>
+ struct check
+ {
+ static_assert(sizeof(int) <= sizeof(T), "not big enough");
+ };
+
+ }
+
+ namespace test_final_override
+ {
+
+ struct Base
+ {
+ virtual void f() {}
+ };
+
+ struct Derived : public Base
+ {
+ virtual void f() override {}
+ };
+
+ }
+
+ namespace test_double_right_angle_brackets
+ {
+
+ template < typename T >
+ struct check {};
+
+ typedef check<void> single_type;
+ typedef check<check<void>> double_type;
+ typedef check<check<check<void>>> triple_type;
+ typedef check<check<check<check<void>>>> quadruple_type;
+
+ }
+
+ namespace test_decltype
+ {
+
+ int
+ f()
+ {
+ int a = 1;
+ decltype(a) b = 2;
+ return a + b;
+ }
+
+ }
+
+ namespace test_type_deduction
+ {
+
+ template < typename T1, typename T2 >
+ struct is_same
+ {
+ static const bool value = false;
+ };
+
+ template < typename T >
+ struct is_same<T, T>
+ {
+ static const bool value = true;
+ };
+
+ template < typename T1, typename T2 >
+ auto
+ add(T1 a1, T2 a2) -> decltype(a1 + a2)
+ {
+ return a1 + a2;
+ }
+
+ int
+ test(const int c, volatile int v)
+ {
+ static_assert(is_same<int, decltype(0)>::value == true, "");
+ static_assert(is_same<int, decltype(c)>::value == false, "");
+ static_assert(is_same<int, decltype(v)>::value == false, "");
+ auto ac = c;
+ auto av = v;
+ auto sumi = ac + av + 'x';
+ auto sumf = ac + av + 1.0;
+ static_assert(is_same<int, decltype(ac)>::value == true, "");
+ static_assert(is_same<int, decltype(av)>::value == true, "");
+ static_assert(is_same<int, decltype(sumi)>::value == true, "");
+ static_assert(is_same<int, decltype(sumf)>::value == false, "");
+ static_assert(is_same<int, decltype(add(c, v))>::value == true, "");
+ return (sumf > 0.0) ? sumi : add(c, v);
+ }
+
+ }
+
+ namespace test_noexcept
+ {
+
+ int f() { return 0; }
+ int g() noexcept { return 0; }
+
+ static_assert(noexcept(f()) == false, "");
+ static_assert(noexcept(g()) == true, "");
+
+ }
+
+ namespace test_constexpr
+ {
+
+ template < typename CharT >
+ unsigned long constexpr
+ strlen_c_r(const CharT *const s, const unsigned long acc) noexcept
+ {
+ return *s ? strlen_c_r(s + 1, acc + 1) : acc;
+ }
+
+ template < typename CharT >
+ unsigned long constexpr
+ strlen_c(const CharT *const s) noexcept
+ {
+ return strlen_c_r(s, 0UL);
+ }
+
+ static_assert(strlen_c("") == 0UL, "");
+ static_assert(strlen_c("1") == 1UL, "");
+ static_assert(strlen_c("example") == 7UL, "");
+ static_assert(strlen_c("another\0example") == 7UL, "");
+
+ }
+
+ namespace test_rvalue_references
+ {
+
+ template < int N >
+ struct answer
+ {
+ static constexpr int value = N;
+ };
+
+ answer<1> f(int&) { return answer<1>(); }
+ answer<2> f(const int&) { return answer<2>(); }
+ answer<3> f(int&&) { return answer<3>(); }
+
+ void
+ test()
+ {
+ int i = 0;
+ const int c = 0;
+ static_assert(decltype(f(i))::value == 1, "");
+ static_assert(decltype(f(c))::value == 2, "");
+ static_assert(decltype(f(0))::value == 3, "");
+ }
+
+ }
+
+ namespace test_uniform_initialization
+ {
+
+ struct test
+ {
+ static const int zero {};
+ static const int one {1};
+ };
+
+ static_assert(test::zero == 0, "");
+ static_assert(test::one == 1, "");
+
+ }
+
+ namespace test_lambdas
+ {
+
+ void
+ test1()
+ {
+ auto lambda1 = [](){};
+ auto lambda2 = lambda1;
+ lambda1();
+ lambda2();
+ }
+
+ int
+ test2()
+ {
+ auto a = [](int i, int j){ return i + j; }(1, 2);
+ auto b = []() -> int { return '0'; }();
+ auto c = [=](){ return a + b; }();
+ auto d = [&](){ return c; }();
+ auto e = [a, &b](int x) mutable {
+ const auto identity = [](int y){ return y; };
+ for (auto i = 0; i < a; ++i)
+ a += b--;
+ return x + identity(a + b);
+ }(0);
+ return a + b + c + d + e;
+ }
+
+ int
+ test3()
+ {
+ const auto nullary = [](){ return 0; };
+ const auto unary = [](int x){ return x; };
+ using nullary_t = decltype(nullary);
+ using unary_t = decltype(unary);
+ const auto higher1st = [](nullary_t f){ return f(); };
+ const auto higher2nd = [unary](nullary_t f1){
+ return [unary, f1](unary_t f2){ return f2(unary(f1())); };
+ };
+ return higher1st(nullary) + higher2nd(nullary)(unary);
+ }
+
+ }
+
+ namespace test_variadic_templates
+ {
+
+ template <int...>
+ struct sum;
+
+ template <int N0, int... N1toN>
+ struct sum<N0, N1toN...>
+ {
+ static constexpr auto value = N0 + sum<N1toN...>::value;
+ };
+
+ template <>
+ struct sum<>
+ {
+ static constexpr auto value = 0;
+ };
+
+ static_assert(sum<>::value == 0, "");
+ static_assert(sum<1>::value == 1, "");
+ static_assert(sum<23>::value == 23, "");
+ static_assert(sum<1, 2>::value == 3, "");
+ static_assert(sum<5, 5, 11>::value == 21, "");
+ static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, "");
+
+ }
+
+ // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae
+ // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function
+ // because of this.
+ namespace test_template_alias_sfinae
+ {
+
+ struct foo {};
+
+ template<typename T>
+ using member = typename T::member_type;
+
+ template<typename T>
+ void func(...) {}
+
+ template<typename T>
+ void func(member<T>*) {}
+
+ void test();
+
+ void test() { func<foo>(0); }
+
+ }
+
+} // namespace cxx11
+
+#endif // __cplusplus >= 201103L
+
+]])
+
+
+dnl Tests for new features in C++14
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_14], [[
+
+// If the compiler admits that it is not ready for C++14, why torture it?
+// Hopefully, this will speed up the test.
+
+#ifndef __cplusplus
+
+#error "This is not a C++ compiler"
+
+#elif __cplusplus < 201402L
+
+#error "This is not a C++14 compiler"
+
+#else
+
+namespace cxx14
+{
+
+ namespace test_polymorphic_lambdas
+ {
+
+ int
+ test()
+ {
+ const auto lambda = [](auto&&... args){
+ const auto istiny = [](auto x){
+ return (sizeof(x) == 1UL) ? 1 : 0;
+ };
+ const int aretiny[] = { istiny(args)... };
+ return aretiny[0];
+ };
+ return lambda(1, 1L, 1.0f, '1');
+ }
+
+ }
+
+ namespace test_binary_literals
+ {
+
+ constexpr auto ivii = 0b0000000000101010;
+ static_assert(ivii == 42, "wrong value");
+
+ }
+
+ namespace test_generalized_constexpr
+ {
+
+ template < typename CharT >
+ constexpr unsigned long
+ strlen_c(const CharT *const s) noexcept
+ {
+ auto length = 0UL;
+ for (auto p = s; *p; ++p)
+ ++length;
+ return length;
+ }
+
+ static_assert(strlen_c("") == 0UL, "");
+ static_assert(strlen_c("x") == 1UL, "");
+ static_assert(strlen_c("test") == 4UL, "");
+ static_assert(strlen_c("another\0test") == 7UL, "");
+
+ }
+
+ namespace test_lambda_init_capture
+ {
+
+ int
+ test()
+ {
+ auto x = 0;
+ const auto lambda1 = [a = x](int b){ return a + b; };
+ const auto lambda2 = [a = lambda1(x)](){ return a; };
+ return lambda2();
+ }
+
+ }
+
+ namespace test_digit_seperators
+ {
+
+ constexpr auto ten_million = 100'000'000;
+ static_assert(ten_million == 100000000, "");
+
+ }
+
+ namespace test_return_type_deduction
+ {
+
+ auto f(int& x) { return x; }
+ decltype(auto) g(int& x) { return x; }
+
+ template < typename T1, typename T2 >
+ struct is_same
+ {
+ static constexpr auto value = false;
+ };
+
+ template < typename T >
+ struct is_same<T, T>
+ {
+ static constexpr auto value = true;
+ };
+
+ int
+ test()
+ {
+ auto x = 0;
+ static_assert(is_same<int, decltype(f(x))>::value, "");
+ static_assert(is_same<int&, decltype(g(x))>::value, "");
+ return x;
+ }
+
+ }
+
+} // namespace cxx14
+
+#endif // __cplusplus >= 201402L
+
+]])
--- /dev/null
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+Name: OpenDHT
+Description: C++ Distributed Hash Table library
+Version: @VERSION@
+Libs: -L${libdir} -lopendht
+Libs.private: -lpthread
+Requires.private: gnutls >= 3.1@argon2_lib@
+Cflags: -I${includedir}
--- /dev/null
+
+set(CURRENT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
+set(CURRENT_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR})
+
+configure_file(setup.py.in setup.py)
+
+add_custom_target(python ALL
+ COMMAND python3 setup.py build
+ DEPENDS opendht opendht_cpp.pxd opendht.pyx)
+
+install(CODE "execute_process(COMMAND python3 setup.py install --root=\$ENV{DESTDIR}/ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})")
+if (OPENDHT_TOOLS)
+ install(PROGRAMS tools/dhtcluster.py DESTINATION ${CMAKE_INSTALL_BINDIR} RENAME dhtcluster)
+endif()
\ No newline at end of file
--- /dev/null
+if USE_CYTHON
+
+noinst_HEADERS = \
+ opendht.pyx \
+ opendht_cpp.pxd
+
+PYTHON_INSTALL_RECORD = $(builddir)/install_record.txt
+
+pybuild.stamp:
+ LDFLAGS="-L$(top_srcdir)/src/.libs" $(PYTHON) setup.py build_ext --inplace
+ echo stamp > pybuild.stamp
+
+CLEANFILES = pybuild.stamp
+
+all-local: pybuild.stamp
+clean-local:
+ rm -rf $(builddir)/build $(builddir)/*.so $(PYTHON_INSTALL_RECORD)
+
+install-exec-local:
+ $(PYTHON) setup.py install --root=$(DESTDIR)/ --record $(PYTHON_INSTALL_RECORD)
+ rm -rf $(builddir)/build
+
+if HAVE_PIP
+uninstall-local:
+ /usr/bin/yes | $(PIP) uninstall $(PACKAGE)
+endif
+
+endif
+
--- /dev/null
+# distutils: language = c++
+# distutils: extra_compile_args = -std=c++11
+# distutils: include_dirs = ../../include
+# distutils: library_dirs = ../../src
+# distutils: libraries = opendht gnutls
+# cython: language_level=3
+#
+# Copyright (c) 2015-2016 Savoir-faire Linux Inc.
+# Author(s): Guillaume Roguez <guillaume.roguez@savoirfairelinux.com>
+# Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+# Simon Désaulniers <sim.desaulniers@gmail.com>
+#
+# This wrapper is written for Cython 0.22
+#
+# This file is part of OpenDHT Python Wrapper.
+#
+# OpenDHT Python Wrapper is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# OpenDHT Python Wrapper is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with OpenDHT Python Wrapper. If not, see <https://www.gnu.org/licenses/>.
+
+from libcpp.map cimport map as map
+from libcpp cimport bool
+from libcpp.utility cimport pair
+from libcpp.string cimport string
+from libcpp.memory cimport shared_ptr
+
+from cython.parallel import parallel, prange
+from cython.operator cimport dereference as deref, preincrement as inc, predecrement as dec
+from cpython cimport ref
+
+cimport opendht_cpp as cpp
+
+import threading
+
+cdef inline void lookup_callback(cpp.vector[cpp.shared_ptr[cpp.IndexValue]]* values, cpp.Prefix* p, void *user_data) with gil:
+ cbs = <object>user_data
+ if 'lookup' in cbs and cbs['lookup']:
+ vals = []
+ for val in deref(values):
+ v = IndexValue()
+ v._value = val
+ vals.append(v)
+ cbs['lookup'](vals, p.toString())
+
+cdef inline void shutdown_callback(void* user_data) with gil:
+ cbs = <object>user_data
+ if 'shutdown' in cbs and cbs['shutdown']:
+ cbs['shutdown']()
+ ref.Py_DECREF(cbs)
+
+cdef inline bool get_callback(shared_ptr[cpp.Value] value, void *user_data) with gil:
+ cbs = <object>user_data
+ cb = cbs['get']
+ f = cbs['filter'] if 'filter' in cbs else None
+ pv = Value()
+ pv._value = value
+ return cb(pv) if not f or f(pv) else True
+
+cdef inline void done_callback(bool done, cpp.vector[shared_ptr[cpp.Node]]* nodes, void *user_data) with gil:
+ node_ids = []
+ for n in deref(nodes):
+ h = NodeEntry()
+ h._v.first = n.get().getId()
+ h._v.second = n
+ node_ids.append(h)
+ cbs = <object>user_data
+ if 'done' in cbs and cbs['done']:
+ cbs['done'](done, node_ids)
+ ref.Py_DECREF(cbs)
+
+cdef inline void done_callback_simple(bool done, void *user_data) with gil:
+ cbs = <object>user_data
+ if 'done' in cbs and cbs['done']:
+ cbs['done'](done)
+ ref.Py_DECREF(cbs)
+
+cdef class _WithID(object):
+ def __repr__(self):
+ return "<%s '%s'>" % (self.__class__.__name__, str(self))
+ def __str__(self):
+ return self.getId().toString().decode()
+
+cdef class InfoHash(_WithID):
+ cdef cpp.InfoHash _infohash
+ def __cinit__(self, bytes str=b''):
+ self._infohash = cpp.InfoHash(str) if str else cpp.InfoHash()
+ def __bool__(InfoHash self):
+ return <bool>self._infohash
+ def __richcmp__(InfoHash self, InfoHash other, int op):
+ if op == 0:
+ return self._infohash < other._infohash
+ if op == 1:
+ return self._infohash < other._infohash or self._infohash == other._infohash
+ if op == 2:
+ return self._infohash == other._infohash
+ return NotImplemented
+ def getBit(InfoHash self, bit):
+ return self._infohash.getBit(bit)
+ def setBit(InfoHash self, bit, b):
+ self._infohash.setBit(bit, b)
+ def getId(InfoHash self):
+ return self
+ def toString(InfoHash self):
+ return self._infohash.toString()
+ def toFloat(InfoHash self):
+ return self._infohash.toFloat()
+ @staticmethod
+ def commonBits(InfoHash a, InfoHash b):
+ return cpp.InfoHash.commonBits(a._infohash, b._infohash)
+ @staticmethod
+ def get(str key):
+ h = InfoHash()
+ h._infohash = cpp.InfoHash.get(key.encode())
+ return h
+ @staticmethod
+ def getRandom():
+ h = InfoHash()
+ h._infohash = cpp.InfoHash.getRandom()
+ return h
+
+cdef class SockAddr(object):
+ cdef cpp.SockAddr _addr
+ def toString(SockAddr self):
+ return self._addr.toString()
+ def getPort(SockAddr self):
+ return self._addr.getPort()
+ def getFamily(SockAddr self):
+ return self._addr.getFamily()
+ def setPort(SockAddr self, cpp.in_port_t port):
+ return self._addr.setPort(port)
+ def setFamily(SockAddr self, cpp.sa_family_t af):
+ return self._addr.setFamily(af)
+ def isLoopback(SockAddr self):
+ return self._addr.isLoopback()
+ def isPrivate(SockAddr self):
+ return self._addr.isPrivate()
+ def isUnspecified(SockAddr self):
+ return self._addr.isUnspecified()
+ def __str__(self):
+ return self.toString().decode()
+ def __repr__(self):
+ return "<%s '%s'>" % (self.__class__.__name__, str(self))
+
+cdef class Node(_WithID):
+ cdef shared_ptr[cpp.Node] _node
+ def getId(self):
+ h = InfoHash()
+ h._infohash = self._node.get().getId()
+ return h
+ def getAddr(self):
+ return self._node.get().getAddrStr()
+ def isExpired(self):
+ return self._node.get().isExpired()
+
+cdef class NodeEntry(_WithID):
+ cdef cpp.pair[cpp.InfoHash, shared_ptr[cpp.Node]] _v
+ def getId(self):
+ h = InfoHash()
+ h._infohash = self._v.first
+ return h
+ def getNode(self):
+ n = Node()
+ n._node = self._v.second
+ return n
+
+cdef class Query(object):
+ cdef cpp.Query _query
+ def __cinit__(self, str q_str=''):
+ self._query = cpp.Query(q_str.encode())
+ def __str__(self):
+ return self._query.toString().decode()
+ def buildFrom(self, Select s, Where w):
+ self._query = cpp.Query(s._select, w._where)
+ def isSatisfiedBy(self, Query q):
+ return self._query.isSatisfiedBy(q._query)
+
+cdef class Select(object):
+ cdef cpp.Select _select
+ def __cinit__(self, str q_str=None):
+ if q_str:
+ self._select = cpp.Select(q_str.encode())
+ else:
+ self._select = cpp.Select()
+ def __str__(self):
+ return self._select.toString().decode()
+ def isSatisfiedBy(self, Select os):
+ return self._select.isSatisfiedBy(os._select)
+ def field(self, int field):
+ self._select.field(<cpp.Field> field)
+ return self
+
+cdef class Where(object):
+ cdef cpp.Where _where
+ def __cinit__(self, str q_str=None):
+ if q_str:
+ self._where = cpp.Where(q_str.encode())
+ else:
+ self._where = cpp.Where()
+ def __str__(self):
+ return self._where.toString().decode()
+ def isSatisfiedBy(self, Where where):
+ return self._where.isSatisfiedBy(where._where)
+ def id(self, cpp.uint64_t id):
+ self._where.id(id)
+ return self
+ def valueType(self, cpp.uint16_t type):
+ self._where.valueType(type)
+ return self
+ def owner(self, InfoHash owner_pk_hash):
+ self._where.owner(owner_pk_hash._infohash)
+ return self
+ def seq(self, cpp.uint16_t seq_no):
+ self._where.seq(seq_no)
+ return self
+ def userType(self, str user_type):
+ self._where.userType(user_type.encode())
+ return self
+
+cdef class Value(object):
+ cdef shared_ptr[cpp.Value] _value
+ def __init__(self, bytes val=b''):
+ self._value.reset(new cpp.Value(val, len(val)))
+ def __str__(self):
+ return self._value.get().toString().decode()
+ property owner:
+ def __get__(self):
+ h = InfoHash()
+ h._infohash = self._value.get().owner.get().getId()
+ return h
+ property recipient:
+ def __get__(self):
+ h = InfoHash()
+ h._infohash = self._value.get().recipient
+ return h
+ def __set__(self, InfoHash h):
+ self._value.get().recipient = h._infohash
+ property data:
+ def __get__(self):
+ return string(<char*>self._value.get().data.data(), self._value.get().data.size())
+ def __set__(self, bytes value):
+ self._value.get().data = value
+ property user_type:
+ def __get__(self):
+ return self._value.get().user_type.decode()
+ def __set__(self, str t):
+ self._value.get().user_type = t.encode()
+ property id:
+ def __get__(self):
+ return self._value.get().id
+ def __set__(self, cpp.uint64_t value):
+ self._value.get().id = value
+ property size:
+ def __get__(self):
+ return self._value.get().size()
+
+cdef class NodeSetIter(object):
+ cdef map[cpp.InfoHash, shared_ptr[cpp.Node]]* _nodes
+ cdef map[cpp.InfoHash, shared_ptr[cpp.Node]].iterator _curIter
+ def __init__(self, NodeSet s):
+ self._nodes = &s._nodes
+ self._curIter = self._nodes.begin()
+ def __next__(self):
+ if self._curIter == self._nodes.end():
+ raise StopIteration
+ h = NodeEntry()
+ h._v = deref(self._curIter)
+ inc(self._curIter)
+ return h
+
+cdef class NodeSet(object):
+ cdef map[cpp.InfoHash, shared_ptr[cpp.Node]] _nodes
+ def size(self):
+ return self._nodes.size()
+ def insert(self, NodeEntry l):
+ return self._nodes.insert(l._v).second
+ def extend(self, li):
+ for n in li:
+ self.insert(n)
+ def first(self):
+ if self._nodes.empty():
+ raise IndexError()
+ h = InfoHash()
+ h._infohash = deref(self._nodes.begin()).first
+ return h
+ def last(self):
+ if self._nodes.empty():
+ raise IndexError()
+ h = InfoHash()
+ h._infohash = deref(dec(self._nodes.end())).first
+ return h
+ def __str__(self):
+ s = ''
+ cdef map[cpp.InfoHash, shared_ptr[cpp.Node]].iterator it = self._nodes.begin()
+ while it != self._nodes.end():
+ s += deref(it).first.toString().decode() + ' ' + deref(it).second.get().getAddrStr().decode() + '\n'
+ inc(it)
+ return s
+ def __iter__(self):
+ return NodeSetIter(self)
+
+cdef class PrivateKey(_WithID):
+ cdef shared_ptr[cpp.PrivateKey] _key
+ def getId(self):
+ h = InfoHash()
+ h._infohash = self._key.get().getPublicKey().getId()
+ return h
+ def getPublicKey(self):
+ pk = PublicKey()
+ pk._key = self._key.get().getPublicKey()
+ return pk
+ def decrypt(self, bytes dat):
+ cdef size_t d_len = len(dat)
+ cdef cpp.uint8_t* d_ptr = <cpp.uint8_t*>dat
+ cdef cpp.Blob indat
+ indat.assign(d_ptr, <cpp.uint8_t*>(d_ptr + d_len))
+ cdef cpp.Blob decrypted = self._key.get().decrypt(indat)
+ cdef char* decrypted_c_str = <char *>decrypted.data()
+ cdef Py_ssize_t length = decrypted.size()
+ return decrypted_c_str[:length]
+ def __str__(self):
+ return self.getId().toString().decode()
+ @staticmethod
+ def generate():
+ k = PrivateKey()
+ k._key = cpp.make_shared[cpp.PrivateKey](cpp.PrivateKey.generate())
+ return k
+ @staticmethod
+ def generateEC():
+ k = PrivateKey()
+ k._key = cpp.make_shared[cpp.PrivateKey](cpp.PrivateKey.generateEC())
+ return k
+
+cdef class PublicKey(_WithID):
+ cdef cpp.PublicKey _key
+ def getId(self):
+ h = InfoHash()
+ h._infohash = self._key.getId()
+ return h
+ def encrypt(self, bytes dat):
+ cdef size_t d_len = len(dat)
+ cdef cpp.uint8_t* d_ptr = <cpp.uint8_t*>dat
+ cdef cpp.Blob indat
+ indat.assign(d_ptr, <cpp.uint8_t*>(d_ptr + d_len))
+ cdef cpp.Blob encrypted = self._key.encrypt(indat)
+ cdef char* encrypted_c_str = <char *>encrypted.data()
+ cdef Py_ssize_t length = encrypted.size()
+ return encrypted_c_str[:length]
+
+cdef class Certificate(_WithID):
+ cdef shared_ptr[cpp.Certificate] _cert
+ def __init__(self, bytes dat = None):
+ if dat:
+ self._cert = cpp.make_shared[cpp.Certificate](<cpp.string>dat)
+ def getId(self):
+ h = InfoHash()
+ if self._cert:
+ h._infohash = self._cert.get().getId()
+ return h
+ def toString(self):
+ return self._cert.get().toString().decode()
+ def getName(self):
+ return self._cert.get().getName()
+ def revoke(self, PrivateKey k, Certificate c):
+ self._cert.get().revoke(deref(k._key.get()), deref(c._cert.get()));
+ def __bytes__(self):
+ return self._cert.get().toString() if self._cert else b''
+ property issuer:
+ def __get__(self):
+ c = Certificate()
+ c._cert = self._cert.get().issuer
+ return c;
+ @staticmethod
+ def generate(PrivateKey k, str name, Identity i = Identity(), bool is_ca = False):
+ c = Certificate()
+ c._cert = cpp.make_shared[cpp.Certificate](cpp.Certificate.generate(deref(k._key.get()), name.encode(), i._id, is_ca))
+ return c
+
+cdef class VerifyResult(object):
+ cdef cpp.TrustListVerifyResult _result
+ def __bool__(self):
+ return self._result.isValid()
+ def __str(self):
+ return self._result.toString()
+
+cdef class TrustList(object):
+ cdef cpp.TrustList _trust
+ def add(self, Certificate cert):
+ self._trust.add(deref(cert._cert.get()))
+ def remove(self, Certificate cert):
+ self._trust.remove(deref(cert._cert.get()))
+ def verify(self, Certificate cert):
+ r = VerifyResult()
+ r._result = self._trust.verify(deref(cert._cert.get()))
+ return r
+
+cdef class ListenToken(object):
+ cdef cpp.InfoHash _h
+ cdef cpp.shared_future[size_t] _t
+ _cb = dict()
+
+cdef class Identity(object):
+ cdef cpp.Identity _id
+ def __init__(self, PrivateKey k = None, Certificate c = None):
+ if k:
+ self._id.first = k._key
+ if c:
+ self._id.second = c._cert
+ @staticmethod
+ def generate(str name = "pydht", Identity ca = Identity(), unsigned bits = 4096):
+ i = Identity()
+ i._id = cpp.generateIdentity(name.encode(), ca._id, bits)
+ return i
+ property publickey:
+ def __get__(self):
+ k = PublicKey()
+ k._key = self._id.first.get().getPublicKey()
+ return k
+ property certificate:
+ def __get__(self):
+ c = Certificate()
+ c._cert = self._id.second
+ return c
+ property key:
+ def __get__(self):
+ k = PrivateKey()
+ k._key = self._id.first
+ return k
+
+cdef class DhtConfig(object):
+ cdef cpp.DhtRunnerConfig _config
+ def __init__(self):
+ self._config = cpp.DhtRunnerConfig()
+ self._config.threaded = True;
+ def setIdentity(self, Identity id):
+ self._config.dht_config.id = id._id
+ def setBootstrapMode(self, bool bootstrap):
+ self._config.dht_config.node_config.is_bootstrap = bootstrap
+ def setNodeId(self, InfoHash id):
+ self._config.dht_config.node_config.node_id = id._infohash
+ def setNetwork(self, unsigned netid):
+ self._config.dht_config.node_config.network = netid
+ def setMaintainStorage(self, bool maintain_storage):
+ self._config.dht_config.node_config.maintain_storage = maintain_storage
+
+cdef class DhtRunner(_WithID):
+ cdef cpp.shared_ptr[cpp.DhtRunner] thisptr
+ def __cinit__(self):
+ self.thisptr.reset(new cpp.DhtRunner())
+ def getId(self):
+ h = InfoHash()
+ if self.thisptr:
+ h._infohash = self.thisptr.get().getId()
+ return h
+ def getNodeId(self):
+ return self.thisptr.get().getNodeId().toString()
+ def ping(self, SockAddr addr, done_cb=None):
+ if done_cb:
+ cb_obj = {'done':done_cb}
+ ref.Py_INCREF(cb_obj)
+ self.thisptr.get().bootstrap(addr._addr, cpp.bindDoneCbSimple(done_callback_simple, <void*>cb_obj))
+ else:
+ lock = threading.Condition()
+ pending = 0
+ ok = False
+ def tmp_done(ok_ret):
+ nonlocal pending, ok, lock
+ with lock:
+ ok = ok_ret
+ pending -= 1
+ lock.notify()
+ with lock:
+ pending += 1
+ self.ping(addr, done_cb=tmp_done)
+ while pending > 0:
+ lock.wait()
+ return ok
+ def bootstrap(self, str host, str port=None):
+ host_bytes = host.encode()
+ port_bytes = port.encode() if port else b'4222'
+ self.thisptr.get().bootstrap(<cpp.const_char*>host_bytes, <cpp.const_char*>port_bytes)
+ def run(self, Identity id=None, is_bootstrap=False, cpp.in_port_t port=0, str ipv4="", str ipv6="", DhtConfig config=DhtConfig()):
+ if id:
+ config.setIdentity(id)
+ if ipv4 or ipv6:
+ bind4 = ipv4.encode() if ipv4 else b''
+ bind6 = ipv6.encode() if ipv6 else b''
+ self.thisptr.get().run(bind4, bind6, str(port).encode(), config._config)
+ else:
+ self.thisptr.get().run(port, config._config)
+ def join(self):
+ self.thisptr.get().join()
+ def shutdown(self, shutdown_cb=None):
+ cb_obj = {'shutdown':shutdown_cb}
+ ref.Py_INCREF(cb_obj)
+ self.thisptr.get().shutdown(cpp.bindShutdownCb(shutdown_callback, <void*>cb_obj))
+ def enableLogging(self):
+ cpp.enableLogging(self.thisptr.get()[0])
+ def disableLogging(self):
+ cpp.disableLogging(self.thisptr.get()[0])
+ def enableFileLogging(self, str path):
+ cpp.enableFileLogging(self.thisptr.get()[0], path.encode())
+ def isRunning(self):
+ return self.thisptr.get().isRunning()
+ def getBound(self, cpp.sa_family_t af = 0):
+ s = SockAddr()
+ s._addr = self.thisptr.get().getBound(af)
+ return s
+ def getStorageLog(self):
+ return self.thisptr.get().getStorageLog().decode()
+ def getRoutingTablesLog(self, cpp.sa_family_t af):
+ return self.thisptr.get().getRoutingTablesLog(af).decode()
+ def getSearchesLog(self, cpp.sa_family_t af):
+ return self.thisptr.get().getSearchesLog(af).decode()
+ def getNodeMessageStats(self):
+ stats = []
+ cdef cpp.vector[unsigned] res = self.thisptr.get().getNodeMessageStats(False)
+ for n in res:
+ stats.append(n)
+ return stats
+
+ def get(self, InfoHash key, get_cb=None, done_cb=None, filter=None, Where where=None):
+ """Retreive values associated with a key on the DHT.
+
+ key -- the key for which to search
+ get_cb -- is set, makes the operation non-blocking. Called when a value
+ is found on the DHT.
+ done_cb -- optional callback used when get_cb is set. Called when the
+ operation is completed.
+ """
+ if get_cb:
+ cb_obj = {'get':get_cb, 'done':done_cb, 'filter':filter}
+ ref.Py_INCREF(cb_obj)
+ if where is None:
+ where = Where()
+ self.thisptr.get().get(key._infohash, cpp.bindGetCb(get_callback, <void*>cb_obj),
+ cpp.bindDoneCb(done_callback, <void*>cb_obj),
+ cpp.nullptr, #filter implemented in the get_callback
+ where._where)
+ else:
+ lock = threading.Condition()
+ pending = 0
+ res = []
+ def tmp_get(v):
+ nonlocal res
+ res.append(v)
+ return True
+ def tmp_done(ok, nodes):
+ nonlocal pending, lock
+ with lock:
+ pending -= 1
+ lock.notify()
+ with lock:
+ pending += 1
+ self.get(key, get_cb=tmp_get, done_cb=tmp_done, filter=filter, where=where)
+ while pending > 0:
+ lock.wait()
+ return res
+ def put(self, InfoHash key, Value val, done_cb=None):
+ """Publish a new value on the DHT at key.
+
+ key -- the DHT key where to put the value
+ val -- the value to put on the DHT
+ done_cb -- optional callback called when the operation is completed.
+ """
+ if done_cb:
+ cb_obj = {'done':done_cb}
+ ref.Py_INCREF(cb_obj)
+ self.thisptr.get().put(key._infohash, val._value, cpp.bindDoneCb(done_callback, <void*>cb_obj))
+ else:
+ lock = threading.Condition()
+ pending = 0
+ ok = False
+ def tmp_done(ok_ret, nodes):
+ nonlocal pending, ok, lock
+ with lock:
+ ok = ok_ret
+ pending -= 1
+ lock.notify()
+ with lock:
+ pending += 1
+ self.put(key, val, done_cb=tmp_done)
+ while pending > 0:
+ lock.wait()
+ return ok
+ def listen(self, InfoHash key, get_cb):
+ t = ListenToken()
+ t._h = key._infohash
+ cb_obj = {'get':get_cb}
+ t._cb['cb'] = cb_obj
+ # avoid the callback being destructed if the token is destroyed
+ ref.Py_INCREF(cb_obj)
+ t._t = self.thisptr.get().listen(t._h, cpp.bindGetCb(get_callback, <void*>cb_obj)).share()
+ return t
+ def cancelListen(self, ListenToken token):
+ self.thisptr.get().cancelListen(token._h, token._t)
+ ref.Py_DECREF(<object>token._cb['cb'])
+ # fixme: not thread safe
+
+cdef class IndexValue(object):
+ cdef cpp.shared_ptr[cpp.IndexValue] _value
+ def __init__(self, InfoHash h=None, cpp.uint64_t vid=0):
+ cdef cpp.InfoHash hh = h._infohash
+ self._value.reset(new cpp.IndexValue(hh, vid))
+ def __str__(self):
+ return "(" + self.getKey().toString().decode() +", "+ str(self.getValueId()) +")"
+ def getKey(self):
+ h = InfoHash()
+ h._infohash = self._value.get().first
+ return h
+ def getValueId(self):
+ return self._value.get().second
+
+cdef class Pht(object):
+ cdef cpp.Pht* thisptr
+ def __cinit__(self, bytes name, key_spec, DhtRunner dht):
+ cdef cpp.IndexKeySpec cpp_key_spec
+ for kk, size in key_spec.items():
+ cpp_key_spec[bytes(kk, 'utf-8')] = size
+ self.thisptr = new cpp.Pht(name, cpp_key_spec, dht.thisptr)
+ property MAX_NODE_ENTRY_COUNT:
+ def __get__(self):
+ return cpp.PHT_MAX_NODE_ENTRY_COUNT
+ def lookup(self, key, lookup_cb=None, done_cb=None):
+ """Query the Index with a specified key.
+
+ key -- the key for to the entry in the index.
+ lookup_cb -- function called when the operation is completed. This
+ function takes a list of IndexValue objects and a string
+ representation of the prefix where the value was indexed in
+ the PHT.
+ """
+ cb_obj = {'lookup':lookup_cb, 'done':done_cb} # TODO: donecallback is to be removed
+ ref.Py_INCREF(cb_obj)
+ cdef cpp.IndexKey cppk
+ for kk, v in key.items():
+ cppk[bytes(kk, 'utf-8')] = bytes(v)
+ self.thisptr.lookup(
+ cppk,
+ cpp.Pht.bindLookupCb(lookup_callback, <void*>cb_obj),
+ cpp.bindDoneCbSimple(done_callback_simple, <void*>cb_obj)
+ )
+ def insert(self, key, IndexValue value, done_cb=None):
+ """Add an index entry to the Index.
+
+ key -- the key for to the entry in the index.
+ value -- an IndexValue object describing the indexed value.
+ done_cb -- Called when the operation is completed.
+ """
+ cb_obj = {'done':done_cb}
+ ref.Py_INCREF(cb_obj)
+ cdef cpp.IndexKey cppk
+ for kk, v in key.items():
+ cppk[bytes(kk, 'utf-8')] = bytes(v)
+ cdef cpp.IndexValue val
+ val.first = (<InfoHash>value.getKey())._infohash
+ val.second = value.getValueId()
+ self.thisptr.insert(
+ cppk,
+ val,
+ cpp.bindDoneCbSimple(done_callback_simple, <void*>cb_obj)
+ )
--- /dev/null
+# Copyright (c) 2015-2016 Savoir-faire Linux Inc.
+# Author(s): Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+# Simon Désaulniers <sim.desaulniers@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; If not, see <https://www.gnu.org/licenses/>.
+
+from libc.stdint cimport *
+from libcpp cimport bool, nullptr_t, nullptr
+from libcpp.string cimport string
+from libcpp.vector cimport vector
+from libcpp.utility cimport pair
+from libcpp.map cimport map
+from libc.string cimport const_char, const_uchar
+
+ctypedef uint16_t in_port_t
+ctypedef unsigned short int sa_family_t;
+
+cdef extern from "<memory>" namespace "std" nogil:
+ cdef cppclass shared_ptr[T]:
+ shared_ptr() except +
+ shared_ptr(T*) except +
+ T* get()
+ T operator*()
+ bool operator bool() const
+ void reset(T*)
+ shared_ptr[T] make_shared[T](...) except +
+
+cdef extern from "<functional>" namespace "std" nogil:
+ cdef cppclass hash[T]:
+ hash() except +
+ size_t get "operator()"(T)
+
+cdef extern from "<future>" namespace "std" nogil:
+ cdef cppclass shared_future[T]:
+ shared_future() except +
+ bool valid() const
+
+ cdef cppclass future[T]:
+ future() except +
+ bool valid() const
+ shared_future[T] share()
+
+cdef extern from "opendht/infohash.h" namespace "dht":
+ cdef cppclass InfoHash:
+ InfoHash() except +
+ InfoHash(string s) except +
+ string toString() const
+ bool getBit(unsigned bit) const
+ void setBit(unsigned bit, bool b)
+ double toFloat() const
+ @staticmethod
+ unsigned commonBits(InfoHash a, InfoHash b)
+ @staticmethod
+ InfoHash get(string s)
+ @staticmethod
+ InfoHash getRandom()
+ bool operator==(InfoHash) const
+ bool operator<(InfoHash) const
+ bool operator bool() const
+
+cdef extern from "opendht/sockaddr.h" namespace "dht":
+ cdef cppclass SockAddr:
+ SockAddr() except +
+ string toString() const
+ in_port_t getPort() const
+ void setPort(in_port_t p)
+ sa_family_t getFamily() const
+ void setFamily(sa_family_t f)
+ bool isLoopback() const
+ bool isPrivate() const
+ bool isUnspecified() const
+
+ctypedef vector[uint8_t] Blob
+
+cdef extern from "opendht/crypto.h" namespace "dht::crypto":
+ ctypedef pair[shared_ptr[PrivateKey], shared_ptr[Certificate]] Identity
+ cdef Identity generateIdentity(string name, Identity ca, unsigned bits)
+
+ cdef cppclass PrivateKey:
+ PrivateKey()
+ PublicKey getPublicKey() const
+ Blob decrypt(Blob data) const
+ @staticmethod
+ PrivateKey generate()
+ @staticmethod
+ PrivateKey generateEC()
+
+ cdef cppclass PublicKey:
+ PublicKey()
+ InfoHash getId() const
+ Blob encrypt(Blob data) const
+
+ cdef cppclass Certificate:
+ Certificate()
+ Certificate(string pem)
+ InfoHash getId() const
+ string toString() const
+ string getName() const
+ void revoke(PrivateKey key, Certificate cert)
+ @staticmethod
+ Certificate generate(PrivateKey key, string name, Identity ca, bool is_ca)
+ shared_ptr[Certificate] issuer
+
+ cdef cppclass TrustList:
+ cppclass VerifyResult:
+ bool operator bool() const
+ bool isValid() const
+ string toString() const
+ TrustList()
+ void add(Certificate)
+ void remove(Certificate)
+ VerifyResult verify(Certificate);
+
+ctypedef TrustList.VerifyResult TrustListVerifyResult
+
+cdef extern from "opendht/value.h" namespace "dht::Value":
+ cdef cppclass Field:
+ pass
+
+cdef extern from "opendht/value.h" namespace "dht::Value::Field":
+ cdef Field None
+ cdef Field Id
+ cdef Field ValueType
+ cdef Field OwnerPk
+ cdef Field SeqNum
+ cdef Field UserType
+ cdef Field COUNT
+
+cdef extern from "opendht/value.h" namespace "dht":
+ cdef cppclass Value:
+ Value() except +
+ Value(vector[uint8_t]) except +
+ Value(const uint8_t* dat_ptr, size_t dat_len) except +
+ string toString() const
+ size_t size() const
+ uint64_t id
+ shared_ptr[PublicKey] owner
+ InfoHash recipient
+ vector[uint8_t] data
+ string user_type
+
+ cdef cppclass Query:
+ Query() except +
+ Query(Select s, Where w) except +
+ Query(string q_str) except +
+ bool isSatisfiedBy(const Query& q) const
+ string toString() const
+
+ cdef cppclass Select:
+ Select() except +
+ Select(const string& q_str) except +
+ bool isSatisfiedBy(const Select& os) const
+ Select& field(Field field)
+ string toString() const
+
+ cdef cppclass Where:
+ Where() except +
+ Where(const string& q_str)
+ bool isSatisfiedBy(const Where& where) const
+ Where& id(uint64_t id)
+ Where& valueType(uint16_t type)
+ Where& owner(InfoHash owner_pk_hash)
+ Where& seq(uint16_t seq_no)
+ Where& userType(string user_type)
+ string toString() const
+
+cdef extern from "opendht/node.h" namespace "dht":
+ cdef cppclass Node:
+ Node() except +
+ InfoHash getId() const
+ string getAddrStr() const
+ bool isExpired() const
+
+cdef extern from "opendht/callbacks.h" namespace "dht":
+ ctypedef void (*ShutdownCallbackRaw)(void *user_data)
+ ctypedef bool (*GetCallbackRaw)(shared_ptr[Value] values, void *user_data)
+ ctypedef void (*DoneCallbackRaw)(bool done, vector[shared_ptr[Node]]* nodes, void *user_data)
+ ctypedef void (*DoneCallbackSimpleRaw)(bool done, void *user_data)
+
+ cppclass ShutdownCallback:
+ ShutdownCallback() except +
+ cppclass GetCallback:
+ GetCallback() except +
+ cppclass DoneCallback:
+ DoneCallback() except +
+ cppclass DoneCallbackSimple:
+ DoneCallbackSimple() except +
+
+ cdef ShutdownCallback bindShutdownCb(ShutdownCallbackRaw cb, void *user_data)
+ cdef GetCallback bindGetCb(GetCallbackRaw cb, void *user_data)
+ cdef DoneCallback bindDoneCb(DoneCallbackRaw cb, void *user_data)
+ cdef DoneCallbackSimple bindDoneCbSimple(DoneCallbackSimpleRaw cb, void *user_data)
+
+ cppclass Config:
+ InfoHash node_id
+ uint32_t network
+ bool is_bootstrap
+ bool maintain_storage
+ cppclass SecureDhtConfig:
+ Config node_config
+ Identity id
+
+cdef extern from "opendht/dhtrunner.h" namespace "dht":
+ ctypedef future[size_t] ListenToken
+ ctypedef shared_future[size_t] SharedListenToken
+ cdef cppclass DhtRunner:
+ DhtRunner() except +
+ cppclass Config:
+ SecureDhtConfig dht_config
+ bool threaded
+ InfoHash getId() const
+ InfoHash getNodeId() const
+ void bootstrap(const_char*, const_char*)
+ void bootstrap(const SockAddr&, DoneCallbackSimple done_cb)
+ void run(in_port_t, Config config)
+ void run(const_char*, const_char*, const_char*, Config config)
+ void join()
+ void shutdown(ShutdownCallback)
+ bool isRunning()
+ SockAddr getBound(sa_family_t af) const
+ string getStorageLog() const
+ string getRoutingTablesLog(sa_family_t af) const
+ string getSearchesLog(sa_family_t af) const
+ void get(InfoHash key, GetCallback get_cb, DoneCallback done_cb, nullptr_t f, Where w)
+ void put(InfoHash key, shared_ptr[Value] val, DoneCallback done_cb)
+ ListenToken listen(InfoHash key, GetCallback get_cb)
+ void cancelListen(InfoHash key, SharedListenToken token)
+ vector[unsigned] getNodeMessageStats(bool i)
+
+ctypedef DhtRunner.Config DhtRunnerConfig
+
+cdef extern from "opendht/log.h" namespace "dht::log":
+ void enableLogging(DhtRunner& dht)
+ void disableLogging(DhtRunner& dht)
+ void enableFileLogging(DhtRunner& dht, const string& path)
+
+cdef extern from "opendht/indexation/pht.h" namespace "dht::indexation":
+ size_t PHT_MAX_NODE_ENTRY_COUNT "dht::indexation::Pht::MAX_NODE_ENTRY_COUNT"
+ cdef cppclass Prefix:
+ Prefix() except +
+ Prefix(vector[uint8_t]) except +
+ string toString() const
+ ctypedef pair[InfoHash, uint64_t] IndexValue "dht::indexation::Value"
+ ctypedef map[string, vector[uint8_t]] IndexKey "dht::indexation::Pht::Key"
+ ctypedef map[string, uint32_t] IndexKeySpec "dht::indexation::Pht::KeySpec"
+ ctypedef void (*LookupCallbackRaw)(vector[shared_ptr[IndexValue]]* values, Prefix* p, void* user_data);
+ cdef cppclass Pht:
+ cppclass LookupCallback:
+ LookupCallback() except +
+ Pht(string, IndexKeySpec, shared_ptr[DhtRunner]) except +
+ void lookup(IndexKey k, LookupCallback cb, DoneCallbackSimple doneCb);
+ void insert(IndexKey k, IndexValue v, DoneCallbackSimple cb)
+ @staticmethod
+ LookupCallback bindLookupCb(LookupCallbackRaw cb, void *user_data)
--- /dev/null
+# Copyright (C) 2015-2017 Savoir-faire Linux Inc.
+# Author: Guillaume Roguez <guillaume.roguez@savoirfairelinux.com>
+# Author: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+#
+# A Python3 wrapper to access to OpenDHT API
+# This wrapper is written for Cython 0.22
+#
+# This file is part of OpenDHT Python Wrapper.
+#
+# OpenDHT Python Wrapper is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# OpenDHT Python Wrapper is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with OpenDHT Python Wrapper. If not, see <https://www.gnu.org/licenses/>.
+#
+
+from setuptools import setup, Extension
+from Cython.Build import cythonize
+from Cython.Distutils import build_ext
+
+setup(name="opendht",
+ version="@PACKAGE_VERSION@",
+ description="Python wrapper for OpenDHT",
+ url='https://github.com/savoirfairelinux/opendht',
+ author="Adrien Béraud, Guillaume Roguez, Simon Désaulniers",
+ license="GPLv3",
+ classifiers=[
+ 'Development Status :: 3 - Alpha',
+ 'Intended Audience :: Developers',
+ 'Topic :: Software Development :: Libraries :: Python Modules',
+ 'Topic :: System :: Distributed Computing',
+ 'Topic :: System :: Networking'
+ 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.2',
+ 'Programming Language :: Python :: 3.3',
+ 'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
+ ],
+ cmdclass = { 'build_ext' : build_ext },
+ ext_modules = cythonize(Extension(
+ "opendht",
+ ["@CURRENT_SOURCE_DIR@/opendht.pyx"],
+ include_dirs = ['@PROJECT_SOURCE_DIR@/include'],
+ language="c++",
+ extra_compile_args=["-std=c++11"],
+ extra_link_args=["-std=c++11"],
+ libraries=["opendht"],
+ library_dirs = ['@CURRENT_BINARY_DIR@', '@PROJECT_BINARY_DIR@']
+ ))
+)
--- /dev/null
+import unittest
+import opendht as dht
+
+class OpenDhtTester(unittest.TestCase):
+
+ # test that DhtRunner can be instatiated and deleted without throwing
+ def test_instance(self):
+ for i in range(10):
+ r = dht.DhtRunner()
+ r.run()
+ del r
+
+ # test that bootstraping works (raw address)
+ def test_bootstrap(self):
+ a = dht.DhtRunner()
+ a.run()
+ b = dht.DhtRunner()
+ b.run()
+ self.assertTrue(b.ping(a.getBound()))
+
+ def test_crypto(self):
+ i = dht.Identity.generate("id")
+ message = dht.InfoHash.getRandom().toString()
+ encrypted = i.publickey.encrypt(message)
+ decrypted = i.key.decrypt(encrypted)
+ self.assertTrue(message == decrypted)
+
+ def test_crypto_ec(self):
+ key = dht.PrivateKey.generateEC()
+ cert = dht.Certificate.generate(key, "CA", is_ca=True)
+ ca_id = dht.Identity(key, cert)
+ self.assertTrue(cert.getId() == key.getPublicKey().getId())
+ key2 = dht.PrivateKey.generateEC()
+ cert2 = dht.Certificate.generate(key2, "cert", ca_id)
+ trust = dht.TrustList()
+ trust.add(cert)
+ self.assertTrue(trust.verify(cert2))
+
+ def test_trust(self):
+ main_id = dht.Identity.generate("id_1")
+ sub_id1 = dht.Identity.generate("sid_1", main_id)
+ sub_id2 = dht.Identity.generate("sid_2", main_id)
+ main_id.certificate.revoke(main_id.key, sub_id2.certificate)
+ main_id2 = dht.Identity.generate("id_2")
+ trust = dht.TrustList()
+ trust.add(main_id.certificate)
+ self.assertTrue(trust.verify(main_id.certificate))
+ self.assertTrue(trust.verify(sub_id1.certificate))
+ self.assertFalse(trust.verify(sub_id2.certificate))
+ self.assertFalse(trust.verify(main_id2.certificate))
+
+if __name__ == '__main__':
+ unittest.main()
--- /dev/null
+# Benchmark
+
+The `benchmark.py` script is used for testing OpenDHT in various cases. If you
+run `benchmark.py --help`, you should find the following text:
+
+ optional arguments:
+ -h, --help show this help message and exit
+ --performance Launches performance benchmark test. Available args
+ for "-t" are: gets.
+ --data-persistence Launches data persistence benchmark test. Available
+ args for "-t" are: delete, replace, mult_time.
+ Available args for "-o" are : dump_str_log,
+ keep_alive, trigger, traffic_plot, op_plot. Use "-m"
+ to specify the number of producers on the DHT.Use "-e"
+ to specify the number of values to put on the DHT.
+
+These options specify the feature to be tested. Each feature has its own tests.
+You specify the test by using `-t` flag (see `benchmark.py --help` for full
+help).
+
+## Python dependencies
+
+- pyroute2 >=0.3.14
+- matplotlib
+- GeoIP (used by `scanner.py` for drawing map of the world)
+- ipaddress
+- netifaces
+- networkx
+- numpy
+
+## Usage
+
+Before running the script, you have to build and install OpenDHT and its cython
+wrapper (`cython3` has to be installed) on the system so that it can be found by
+the benchmark script.
+
+ $ cd $OPENDHT_SRC_DIR
+ $ ./autogen.sh
+ $ ./configure
+ $ make && sudo make install
+
+Then, you can use the script like so:
+
+ $ cd $OPENDHT_SRC_DIR/python/tools/
+ $ python3 benchmark.py --performance -t gets -n 2048
--- /dev/null
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# Copyright (C) 2015-2016 Savoir-faire Linux Inc.
+# Author(s): Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+# Simon Désaulniers <sim.desaulniers@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import sys
+import subprocess
+import signal
+import argparse
+import time
+import random
+
+from dht.network import DhtNetwork
+from dht.network import DhtNetworkSubProcess
+from dht.tests import PerformanceTest, PersistenceTest, PhtTest
+from dht import virtual_network_builder
+from dht import network as dhtnetwork
+
+from opendht import *
+
+
+class WorkBench():
+ """
+ This contains the initialisation information, such as ipv4/ipv6, number of
+ nodes and cluster to create, etc. This class is also used to initialise and
+ finish the network.
+ """
+ def __init__(self, ifname='ethdht', virtual_locs=8, node_num=32, remote_bootstrap=None, loss=0, delay=0, disable_ipv4=False,
+ disable_ipv6=False):
+ self.ifname = ifname
+ self.virtual_locs = virtual_locs
+ self.node_num = node_num
+ self.clusters = min(virtual_locs, node_num)
+ self.node_per_loc = int(self.node_num / self.clusters)
+ self.loss = loss
+ self.delay = delay
+ self.disable_ipv4 = disable_ipv4
+ self.disable_ipv6 = disable_ipv6
+
+ self.remote_bootstrap = remote_bootstrap
+ self.local_bootstrap = None
+ self.bs_port = "5000"
+ self.procs = [None for _ in range(self.clusters)]
+
+ def get_bootstrap(self):
+ if not self.local_bootstrap:
+ self.local_bootstrap = DhtNetwork(iface='br'+self.ifname,
+ first_bootstrap=False if self.remote_bootstrap else True,
+ bootstrap=[(self.remote_bootstrap, self.bs_port)] if self.remote_bootstrap else [])
+ return self.local_bootstrap
+
+ def create_virtual_net(self):
+ if self.virtual_locs > 1:
+ cmd = ["python3", os.path.abspath(virtual_network_builder.__file__),
+ "-i", self.ifname,
+ "-n", str(self.clusters),
+ '-l', str(self.loss),
+ '-d', str(self.delay)]
+ if not self.disable_ipv4:
+ cmd.append('-4')
+ if not self.disable_ipv6:
+ cmd.append('-6')
+ print(cmd)
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ output, err = p.communicate()
+ print(output.decode())
+
+ def destroy_virtual_net(self):
+ print('Shuting down the virtual IP network.')
+ subprocess.call(["python3", os.path.abspath(virtual_network_builder.__file__), "-i", self.ifname, "-n", str(self.clusters), "-r"])
+
+ def start_cluster(self, i):
+ if self.local_bootstrap:
+ cmd = ["python3", os.path.abspath(dhtnetwork.__file__), "-n", str(self.node_per_loc), '-I', self.ifname+str(i)+'.1']
+ if self.remote_bootstrap:
+ cmd.extend(['-b', self.remote_bootstrap, '-bp', "5000"])
+ else:
+ if not self.disable_ipv4 and self.local_bootstrap.ip4:
+ cmd.extend(['-b', self.local_bootstrap.ip4])
+ if not self.disable_ipv6 and self.local_bootstrap.ip6:
+ cmd.extend(['-b6', self.local_bootstrap.ip6])
+ lock = threading.Condition()
+ def dcb(success):
+ nonlocal lock
+ if not success:
+ DhtNetwork.Log.err("Failed to initialize network...")
+ with lock:
+ lock.notify()
+ with lock:
+ self.procs[i] = DhtNetworkSubProcess('node'+str(i), cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ self.procs[i].sendPing(done_cb=dcb)
+ lock.wait()
+ else:
+ raise Exception('First create bootstrap.')
+
+ def stop_cluster(self, i):
+ """
+ Stops a cluster sub process. All nodes are put down without graceful
+ shutdown.
+ """
+ if self.procs[i]:
+ try:
+ self.procs[i].quit()
+ except Exception as e:
+ print(e)
+ self.procs[i] = None
+
+ def replace_cluster(self):
+ """
+ Same as stop_cluster(), but creates a new cluster right after.
+ """
+ n = random.randrange(0, self.clusters)
+ self.stop_cluster(n)
+ self.start_cluster(n)
+
+ def resize_clusters(self, n):
+ """
+ Resizes the list of clusters to be of length ``n``.
+ """
+ procs_count = len(self.procs)
+ if procs_count < n:
+ for i in range(n-procs_count):
+ self.procs.append(None)
+ self.start_cluster(procs_count+i)
+ else:
+ for i in range(procs_count-n):
+ self.stop_cluster(procs_count-i-1)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Run, test and benchmark a '\
+ 'DHT network on a local virtual network with simulated packet '\
+ 'loss and latency.')
+ ifConfArgs = parser.add_argument_group('Virtual interface configuration')
+ ifConfArgs.add_argument('-i', '--ifname', default='ethdht', help='interface name')
+ ifConfArgs.add_argument('-n', '--node-num', type=int, default=32, help='number of dht nodes to run')
+ ifConfArgs.add_argument('-v', '--virtual-locs', type=int, default=8,
+ help='number of virtual locations (node clusters)')
+ ifConfArgs.add_argument('-l', '--loss', type=int, default=0, help='simulated cluster packet loss (percent)')
+ ifConfArgs.add_argument('-d', '--delay', type=int, default=0, help='simulated cluster latency (ms)')
+ ifConfArgs.add_argument('-b', '--bootstrap', default=None, help='Bootstrap node to use (if any)')
+ ifConfArgs.add_argument('-no4', '--disable-ipv4', action="store_true", help='Enable IPv4')
+ ifConfArgs.add_argument('-no6', '--disable-ipv6', action="store_true", help='Enable IPv6')
+
+ testArgs = parser.add_argument_group('Test arguments')
+ testArgs.add_argument('--bs-dht-log', action='store_true', default=False, help='Enables dht log in bootstrap.')
+ testArgs.add_argument('-t', '--test', type=str, default=None, required=True, help='Specifies the test.')
+ testArgs.add_argument('-o', '--opt', type=str, default=[], nargs='+',
+ help='Options passed to tests routines.')
+ testArgs.add_argument('-m', type=int, default=None, help='Generic size option passed to tests.')
+ testArgs.add_argument('-e', type=int, default=None, help='Generic size option passed to tests.')
+
+ featureArgs = parser.add_mutually_exclusive_group(required=True)
+ featureArgs.add_argument('--performance', action='store_true', default=False,
+ help='Launches performance benchmark test. Available args for "-t" are: gets.')
+ featureArgs.add_argument('--pht', action='store_true', default=False,
+ help='Launches PHT benchmark test. '\
+ 'Available args for "-t" are: insert. '\
+ 'Timer available by adding "timer" to "-o" args'\
+ 'Use "-m" option for fixing number of keys to create during the test.')
+ featureArgs.add_argument('--data-persistence', action='store_true', default=0,
+ help='Launches data persistence benchmark test. '\
+ 'Available args for "-t" are: delete, replace, mult_time. '\
+ 'Available args for "-o" are : dump_str_log, keep_alive, trigger, traffic_plot, op_plot. '\
+ 'Use "-m" to specify the number of producers on the DHT. '\
+ 'Use "-e" to specify the number of values to put on the DHT.')
+
+ args = parser.parse_args()
+ test_opt = { o : True for o in args.opt }
+
+ wb = WorkBench(args.ifname, args.virtual_locs, args.node_num, loss=args.loss,
+ delay=args.delay, disable_ipv4=args.disable_ipv4,
+ disable_ipv6=args.disable_ipv6)
+ wb.create_virtual_net()
+ bootstrap = wb.get_bootstrap()
+
+ bs_dht_log_enabled = False
+ def toggle_bs_dht_log(signum, frame):
+ global bs_dht_log_enabled, bootstrap
+ if bs_dht_log_enabled:
+ bootstrap.front().disableLogging()
+ bs_dht_log_enabled = False
+ else:
+ bootstrap.front().enableLogging()
+ bs_dht_log_enabled = True
+ signal.signal(signal.SIGUSR1, toggle_bs_dht_log)
+
+ if args.bs_dht_log:
+ bs_dht_log_enabled = True
+ bootstrap.front().enableLogging()
+
+ bootstrap.resize(1)
+ print("Launching", wb.node_num, "nodes (", wb.clusters, "clusters of", wb.node_per_loc, "nodes)")
+
+ try:
+ for i in range(wb.clusters):
+ wb.start_cluster(i)
+
+ # recover -e and -m values.
+ if args.e:
+ test_opt.update({ 'num_values' : args.e })
+ if args.m:
+ test_opt.update({ 'num_producers' : args.m })
+
+ # run the test
+ if args.performance:
+ PerformanceTest(args.test, wb, test_opt).run()
+ elif args.data_persistence:
+ PersistenceTest(args.test, wb, test_opt).run()
+ elif args.pht:
+ if args.m:
+ test_opt.update({ 'num_keys' : args.m })
+ PhtTest(args.test, wb, test_opt).run()
+
+ except Exception as e:
+ print(e)
+ finally:
+ for p in wb.procs:
+ if p:
+ p.quit()
+ bootstrap.resize(0)
+ sys.stdout.write('Shutting down the virtual IP network... ')
+ sys.stdout.flush()
+ wb.destroy_virtual_net()
+ print('Done.')
--- /dev/null
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# Copyright (c) 2015-2016 Savoir-faire Linux Inc.
+# Author(s): Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+# Simon Désaulniers <sim.desaulniers@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import sys
+import signal
+import random
+import time
+import threading
+import queue
+import re
+import traceback
+
+import ipaddress
+import netifaces
+import numpy as np
+from pyroute2.netns.process.proxy import NSPopen
+import msgpack
+
+from opendht import *
+
+
+# useful functions
+b_space_join = lambda *l: b' '.join(map(bytes, l))
+
+class DhtNetworkSubProcess(NSPopen):
+ """
+ Handles communication with DhtNetwork sub process.
+
+ When instanciated, the object's thread is started and will read the sub
+ process' stdout until it finds 'DhtNetworkSubProcess.NOTIFY_TOKEN' token,
+ therefor, waits for the sub process to spawn.
+ """
+ # Packet types
+ REQUEST = 'DhtNetworkSubProcess.request'
+ ANSWER = 'DhtNetworkSubProcess.answer'
+ OUT = 'DhtNetworkSubProcess.out'
+
+ # requests
+ PING_REQ = "p"
+ NODE_PUT_REQ = "np" # "np <hash> <value>"
+ NEW_NODE_REQ = "nn" # "nn"
+ REMOVE_NODE_REQ = "rn" # "rn <id0>[ <id1>[ id2[...]]]"
+ SHUTDOWN_NODE_REQ = "sdn" # "sdn <id0>[ <id1>[ id2[...]]]"
+ SHUTDOWN_REPLACE_NODE_REQ = "sdrn" # "sdn <id0>[ <id1>[ id2[...]]]"
+ SHUTDOWN_CLUSTER_REQ = "sdc" # "sdc"
+ DUMP_STORAGE_REQ = "strl" # "strl"
+ MESSAGE_STATS = "gms" # "gms"
+
+ def __init__(self, ns, cmd, quit=False, **kwargs):
+ super(DhtNetworkSubProcess, self).__init__(ns, cmd, **kwargs)
+ self._setStdoutFlags()
+ self._virtual_ns = ns
+
+ self._quit = quit
+ self._lock = threading.Condition()
+ self._in_queue = queue.Queue()
+ self._callbacks = {}
+ self._tid = 0
+
+ # starting thread
+ self._thread = threading.Thread(target=self._communicate)
+ self._thread.start()
+
+ def __repr__(self):
+ return 'DhtNetwork on virtual namespace "%s"' % self._virtual_ns
+
+ def _setStdoutFlags(self):
+ """
+ Sets non-blocking read flags for subprocess stdout file descriptor.
+ """
+ import fcntl
+ flags = self.stdout.fcntl(fcntl.F_GETFL)
+ self.stdout.fcntl(fcntl.F_SETFL, flags | os.O_NDELAY)
+
+ def _communicate(self):
+ """
+ Communication thread. This reads and writes to the sub process.
+ """
+ sleep_time = 0.1
+
+ while not self._quit:
+ with self._lock:
+ try:
+ packet = self._in_queue.get_nowait()
+
+ # sending data to sub process
+ self.stdin.write(packet)
+ self.stdin.flush()
+ except queue.Empty:
+ pass
+
+ # reading from sub process
+ out_string = ''
+ for p in msgpack.Unpacker(self.stdout):
+ if isinstance(p, dict):
+ self._process_packet(p)
+ else:
+ # Some non-msgpack data could slip into the stream. We
+ # have to treat those as characters.
+ out_string += chr(p)
+ if out_string:
+ print(out_string)
+
+ #waiting for next stdin req to send
+ self._lock.wait(timeout=sleep_time)
+
+ with self._lock:
+ self._lock.notify()
+
+ def _stop_communicating(self):
+ """
+ Stops the I/O thread from communicating with the subprocess.
+ """
+ if not self._quit:
+ self._quit = True
+ with self._lock:
+ self._lock.notify()
+ self._lock.wait()
+
+ def quit(self):
+ """
+ Notifies thread and sub process to terminate. This is blocking call
+ until the sub process finishes.
+ """
+ self._stop_communicating()
+ self.send_signal(signal.SIGINT);
+ self.wait()
+ self.release()
+
+ def _send(self, msg):
+ """
+ Send data to sub process.
+ """
+ with self._lock:
+ self._in_queue.put(msg)
+ self._lock.notify()
+
+ def _process_packet(self, p):
+ """
+ Process msgpack packet received from
+ """
+ if not b'tid' in p:
+ DhtNetwork.Log.err('Bad packet...')
+ try:
+ self._callbacks[p[b'tid']](p)
+ except KeyError:
+ DhtNetwork.Log.err('Unknown tid...')
+
+
+ def _sendRequest(self, request, tid, done_cb):
+ """
+ Sends a request to the sub network and wait for output.
+
+ @param request: The serialized request.
+ @type request: Msgpack object
+ """
+ self._callbacks[tid] = done_cb
+ self._send(request)
+
+ def sendPing(self, done_cb=None):
+ """Sends a ping request to the DhtNetworkSubProcess.
+
+ @param done_cb: The callback to be executed when we get a response. This
+ function takes a boolean "success" as parameter.
+ @type done_cb: Function
+ """
+ self._tid += 1
+ def dcb(packet):
+ try:
+ done_cb(packet[b'success'])
+ except KeyError:
+ done_cb(False)
+ self._sendRequest(msgpack.packb({
+ DhtNetworkSubProcess.REQUEST : True,
+ 'tid' : self._tid,
+ 'req' : DhtNetworkSubProcess.PING_REQ
+ }), self._tid, dcb)
+
+ def sendGetMessageStats(self, done_cb=None):
+ """
+ Sends DhtNetwork sub process statistics request about nodes messages
+ sent.
+
+ @param done_cb: A function taking as parameter the returned list of
+ stats.
+ @type done_cb: function
+
+ @return: A list [num_nodes, ping, find, get, put, listen].
+ @rtype : list
+ """
+ self._tid += 1
+ def dcb(packet):
+ nonlocal done_cb
+ if not done_cb:
+ return
+ try:
+ stats = packet[b'stats']
+ done_cb([] if not isinstance(stats, list) else done_cb(stats))
+ except KeyError:
+ done_cb([])
+ self._sendRequest(msgpack.packb({
+ DhtNetworkSubProcess.REQUEST : True,
+ 'tid' : self._tid,
+ 'req' : DhtNetworkSubProcess.MESSAGE_STATS
+ }), self._tid, dcb)
+
+ def sendClusterPutRequest(self, _hash, value, done_cb=None):
+ """
+ Sends a put operation request.
+
+ @param _hash: the hash of the value.
+ @type _hash: bytes.
+ @param value: the value.
+ @type value: bytes.
+ @param done_cb: A function taking as parameter a boolean "success".
+ @type done_cb: function
+ """
+ self._tid += 1
+ def dcb(packet):
+ nonlocal done_cb
+ if not done_cb:
+ return
+ try:
+ done_cb(packet[b'success'])
+ except KeyError:
+ done_cb(False)
+ self._sendRequest(msgpack.packb({
+ DhtNetworkSubProcess.REQUEST : True,
+ 'tid' : self._tid,
+ 'req' : DhtNetworkSubProcess.NODE_PUT_REQ,
+ 'hash' : _hash,
+ 'value' : value
+ }), self._tid, dcb)
+
+ def sendClusterRequest(self, request, ids=[], done_cb=None):
+ """
+ Send request to a list of nodes or the whole cluster.
+
+ @param request: The request. Possible values are:
+ DhtNetworkSubProcess.REMOVE_NODE_REQ
+ DhtNetworkSubProcess.SHUTDOWN_NODE_REQ
+ DhtNetworkSubProcess.SHUTDOWN_REPLACE_NODE_REQ
+ DhtNetworkSubProcess.SHUTDOWN_CLUSTER_REQ
+ DhtNetworkSubProcess.DUMP_STORAGE_REQ
+ @type request: bytes
+ @param ids: The list of ids concerned by the request.
+ @type ids: list
+ """
+ self._tid += 1
+ def dcb(packet):
+ nonlocal done_cb
+ if not done_cb:
+ return
+ try:
+ done_cb(packet[b'success'])
+ except KeyError:
+ done_cb(False)
+ self._sendRequest(msgpack.packb({
+ DhtNetworkSubProcess.REQUEST : True,
+ 'tid' : self._tid,
+ 'req' : request,
+ 'ids' : ids
+ }), self._tid, dcb)
+
+
+class DhtNetwork(object):
+ nodes = []
+
+ class Log(object):
+ BOLD = "\033[1m"
+ NORMAL = "\033[0m"
+ WHITE = "\033[97m"
+ RED = "\033[31m"
+ YELLOW = "\033[33m"
+
+ @staticmethod
+ def _log_with_color(*to_print, color=None):
+ color = color if color else DhtNetwork.Log.WHITE
+ print('%s%s[DhtNetwork-%s]%s%s' %
+ (DhtNetwork.Log.BOLD, color, DhtNetwork.iface, DhtNetwork.Log.NORMAL, color),
+ *to_print, DhtNetwork.Log.NORMAL, file=sys.stderr)
+
+ @staticmethod
+ def log(*to_print):
+ DhtNetwork.Log._log_with_color(*to_print, color=DhtNetwork.Log.WHITE)
+
+ @staticmethod
+ def warn(*to_print):
+ DhtNetwork.Log._log_with_color(*to_print, color=DhtNetwork.Log.YELLOW)
+
+ @staticmethod
+ def err(*to_print):
+ DhtNetwork.Log._log_with_color(*to_print, color=DhtNetwork.Log.RED)
+
+ @staticmethod
+ def run_node(ip4, ip6, p, bootstrap=[], is_bootstrap=False):
+ DhtNetwork.Log.log("run_node", ip4, ip6, p, bootstrap)
+ n = DhtRunner()
+ n.run(ipv4=ip4 if ip4 else "", ipv6=ip6 if ip6 else "", port=p, is_bootstrap=is_bootstrap)
+ for b in bootstrap:
+ n.bootstrap(b[0], b[1])
+ time.sleep(.01)
+ return ((ip4, ip6, p), n, id)
+
+ @staticmethod
+ def find_ip(iface):
+ if not iface or iface == 'any':
+ return ('0.0.0.0','')
+
+ if_ip4 = netifaces.ifaddresses(iface)[netifaces.AF_INET][0]['addr']
+ if_ip6 = netifaces.ifaddresses(iface)[netifaces.AF_INET6][0]['addr']
+ return (if_ip4, if_ip6)
+
+ def __init__(self, iface=None, ip4=None, ip6=None, port=4000, bootstrap=[], first_bootstrap=False):
+ DhtNetwork.iface = iface
+ self.port = port
+ ips = DhtNetwork.find_ip(iface)
+ self.ip4 = ip4 if ip4 else ips[0]
+ self.ip6 = ip6 if ip6 else ips[1]
+ self.bootstrap = bootstrap
+ if first_bootstrap:
+ DhtNetwork.Log.log("Starting bootstrap node")
+ self.nodes.append(DhtNetwork.run_node(self.ip4, self.ip6, self.port, self.bootstrap, is_bootstrap=True))
+ self.bootstrap = [(self.ip4, str(self.port))]
+ self.port += 1
+ #print(self.ip4, self.ip6, self.port)
+
+ def front(self):
+ if len(self.nodes) == 0:
+ return None
+ return self.nodes[0][1]
+
+ def get(self, i=None):
+ if not self.nodes:
+ return None
+
+ if i is None:
+ l = list(self.nodes)
+ random.shuffle(l)
+ return l[0][1]
+ else:
+ return self.nodes[i][1]
+
+ def getNodeInfoById(self, id=None):
+ if id:
+ for n in self.nodes:
+ if n[1].getNodeId() == id:
+ return n
+ return None
+
+ def launch_node(self):
+ n = DhtNetwork.run_node(self.ip4, self.ip6, self.port, self.bootstrap)
+ self.nodes.append(n)
+ if not self.bootstrap:
+ DhtNetwork.Log.log("Using fallback bootstrap", self.ip4, self.port)
+ self.bootstrap = [(self.ip4, str(self.port))]
+ self.port += 1
+ return n
+
+ def end_node(self, id=None, shutdown=False, last_msg_stats=None):
+ """
+ Ends a running node.
+
+ @param id: The 40 hex chars id of the node.
+ @type id: bytes
+
+ @return: If a node was deleted or not.
+ @rtype : boolean
+ """
+ lock = threading.Condition()
+ def shutdown_cb():
+ nonlocal lock
+ DhtNetwork.Log.log('Done.')
+ with lock:
+ lock.notify()
+
+ if not self.nodes:
+ return
+ elif id:
+ n = self.getNodeInfoById(id)
+ if n:
+ if shutdown:
+ with lock:
+ DhtNetwork.Log.log('Waiting for node to shutdown... ')
+ n[1].shutdown(shutdown_cb)
+ lock.wait()
+ if last_msg_stats:
+ last_msg_stats.append(self.getMessageStats())
+ n[1].join()
+ self.nodes.remove(n)
+ DhtNetwork.Log.log(id, 'deleted !')
+ return True
+ else:
+ return False
+ else:
+ n = self.nodes.pop()
+ n[1].join()
+ return True
+
+ def replace_node(self, id=None, shutdown=False, last_msg_stats=None):
+ random.shuffle(self.nodes)
+ deleted = self.end_node(id=id, shutdown=shutdown, last_msg_stats=last_msg_stats)
+ if deleted:
+ self.launch_node()
+
+ def resize(self, n):
+ n = min(n, 500)
+ l = len(self.nodes)
+ if n == l:
+ return
+ if n > l:
+ DhtNetwork.Log.log("Launching", n-l, "nodes", self.ip4, self.ip6)
+ for i in range(l, n):
+ self.launch_node()
+ else:
+ DhtNetwork.Log.log("Ending", l-n, "nodes", self.ip4, self.ip6)
+ #random.shuffle(self.nodes)
+ for i in range(n, l):
+ self.end_node()
+
+ def getMessageStats(self):
+ stats = np.array([0,0,0,0,0])
+ for n in self.nodes:
+ stats += np.array(n[1].getNodeMessageStats())
+ stats_list = [len(self.nodes)]
+ stats_list.extend(stats.tolist())
+ return stats_list
+
+
+if __name__ == '__main__':
+ import argparse
+
+ lock = threading.Condition()
+ quit = False
+
+ def send_msgpack_packet(packet):
+ sys.stdout.buffer.write(packet)
+ sys.stdout.buffer.flush()
+
+ def notify_benchmark(packet, success):
+ """Notifies the benchmark when an operation has been completed.
+
+ @param success: If the operation has been successful
+ @type success: boolean
+ @param packet: The packet we are providing an answer for.
+ @type packet: dict
+ """
+ send_msgpack_packet(msgpack.packb({
+ DhtNetworkSubProcess.ANSWER : True,
+ 'tid' : packet[b'tid'],
+ 'success' : success
+ }))
+
+ def send_stats(packet, stats):
+ send_msgpack_packet(msgpack.packb({
+ DhtNetworkSubProcess.ANSWER : True,
+ 'tid' : packet[b'tid'],
+ 'stats' : stats
+ }))
+
+ def listen_to_mother_nature(q):
+ global quit
+ while not quit:
+ for p in msgpack.Unpacker(sys.stdin.buffer.raw):
+ if isinstance(p, dict) and DhtNetworkSubProcess.REQUEST.encode() in p:
+ with lock:
+ q.put(p)
+ lock.notify()
+
+ def handler(signum, frame):
+ global quit
+ with lock:
+ quit = True
+ lock.notify()
+
+ signal.signal(signal.SIGALRM, handler)
+ signal.signal(signal.SIGABRT, handler)
+ signal.signal(signal.SIGINT, handler)
+ signal.signal(signal.SIGTERM, handler)
+
+ net = None
+ try:
+ parser = argparse.ArgumentParser(description='Create a dht network of -n nodes')
+ parser.add_argument('-n', '--node-num', help='number of dht nodes to run', type=int, default=32)
+ parser.add_argument('-I', '--iface', help='local interface to bind', default='any')
+ parser.add_argument('-p', '--port', help='start of port range (port, port+node_num)', type=int, default=4000)
+ parser.add_argument('-b', '--bootstrap', help='bootstrap address')
+ parser.add_argument('-b6', '--bootstrap6', help='bootstrap address (IPv6)')
+ parser.add_argument('-bp', '--bootstrap-port', help='bootstrap port', default="4000")
+ args = parser.parse_args()
+
+ bs = []
+ if args.bootstrap:
+ bs.append((args.bootstrap, args.bootstrap_port))
+ if args.bootstrap6:
+ bs.append((args.bootstrap6, args.bootstrap_port))
+
+ net = DhtNetwork(iface=args.iface, port=args.port, bootstrap=bs)
+ net.resize(args.node_num)
+
+ q = queue.Queue()
+ t = threading.Thread(target=listen_to_mother_nature, args=tuple([q]))
+ t.daemon = True
+ t.start()
+
+ msg_stats = []
+
+ with lock:
+ while not quit:
+ try:
+ packet = q.get_nowait()
+ except queue.Empty:
+ lock.wait()
+ else:
+ NODE_PUT_REQ = DhtNetworkSubProcess.NODE_PUT_REQ
+ NEW_NODE_REQ = DhtNetworkSubProcess.NEW_NODE_REQ
+ REMOVE_NODE_REQ = DhtNetworkSubProcess.REMOVE_NODE_REQ
+ SHUTDOWN_NODE_REQ = DhtNetworkSubProcess.SHUTDOWN_NODE_REQ
+ SHUTDOWN_REPLACE_NODE_REQ = DhtNetworkSubProcess.SHUTDOWN_REPLACE_NODE_REQ
+ SHUTDOWN_CLUSTER_REQ = DhtNetworkSubProcess.SHUTDOWN_CLUSTER_REQ
+ DUMP_STORAGE_REQ = DhtNetworkSubProcess.DUMP_STORAGE_REQ
+ MESSAGE_STATS = DhtNetworkSubProcess.MESSAGE_STATS
+
+ req = packet[b'req'].decode()
+ success = True
+ if req in [SHUTDOWN_NODE_REQ, SHUTDOWN_REPLACE_NODE_REQ, REMOVE_NODE_REQ]:
+ def delete_request(req, nid):
+ global msg_stats
+ if not nid:
+ return
+ if req == SHUTDOWN_NODE_REQ:
+ net.end_node(id=nid, shutdown=True, last_msg_stats=msg_stats)
+ elif req == SHUTDOWN_REPLACE_NODE_REQ:
+ net.replace_node(id=nid, shutdown=True, last_msg_stats=msg_stats)
+ elif req == REMOVE_NODE_REQ:
+ net.end_node(id=nid, last_msg_stats=msg_stats)
+
+ nodes = packet[b'ids']
+ if nodes:
+ for nid in nodes:
+ delete_request(req, nid)
+ else:
+ n = net.get()
+ if n:
+ delete_request(req, n.getNodeId())
+ else:
+ success = False
+ elif req == SHUTDOWN_CLUSTER_REQ:
+ for n in net.nodes:
+ net.end_node(id=n[2], shutdown=True, last_msg_stats=msg_stats)
+ quit = True
+ elif req == NEW_NODE_REQ:
+ net.launch_node()
+ elif req == NODE_PUT_REQ:
+ _hash = packet[b'hash']
+ v = packet[b'value']
+ n = net.get()
+ if n:
+ n.put(InfoHash(_hash), Value(v))
+ else:
+ success = False
+ elif req == DUMP_STORAGE_REQ:
+ hashes = packet[b'ids']
+ for n in [m[1] for m in net.nodes if m[1].getNodeId() in hashes]:
+ net.log(n.getStorageLog())
+ elif req == MESSAGE_STATS:
+ stats = sum([np.array(x) for x in [net.getMessageStats()]+msg_stats])
+ send_stats(packet, [int(_) for _ in stats])
+ msg_stats.clear()
+ continue
+ notify_benchmark(packet, success)
+ except Exception as e:
+ traceback.print_tb(e.__traceback__)
+ print(type(e).__name__+':', e, file=sys.stderr)
+ finally:
+ if net:
+ net.resize(0)
--- /dev/null
+# -*- coding: utf-8 -*-
+# Copyright (C) 2015 Savoir-Faire Linux Inc.
+# Author(s): Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+# Simon Désaulniers <sim.desaulniers@gmail.com>
+
+import sys
+import os
+import threading
+import random
+import string
+import time
+import subprocess
+import re
+import traceback
+import collections
+
+from matplotlib.ticker import FuncFormatter
+import math
+
+import numpy as np
+import matplotlib.pyplot as plt
+import networkx as nx
+from networkx.drawing.nx_agraph import graphviz_layout
+
+
+from opendht import *
+from dht.network import DhtNetwork, DhtNetworkSubProcess
+
+############
+# Common #
+############
+
+# matplotlib display format for bits (b, Kb, Mb)
+bit_format = None
+Kbit_format = FuncFormatter(lambda x, pos: '%1.1f' % (x*1024**-1) + 'Kb')
+Mbit_format = FuncFormatter(lambda x, pos: '%1.1f' % (x*1024**-2) + 'Mb')
+
+def random_str_val(size=1024):
+ """Creates a random string value of specified size.
+
+ @param size: Size, in bytes, of the value.
+ @type size: int
+
+ @return: Random string value
+ @rtype : str
+ """
+ return ''.join(random.choice(string.hexdigits) for _ in range(size))
+
+
+def random_hash():
+ """Creates random InfoHash.
+ """
+ return InfoHash(random_str_val(size=40).encode())
+
+def timer(f, *args):
+ """
+ Start a timer which count time taken for execute function f
+
+ @param f : Function to time
+ @type f : function
+
+ @param args : Arguments of the function f
+ @type args : list
+
+ @rtype : timer
+ @return : Time taken by the function f
+ """
+ start = time.time()
+ f(*args)
+
+ return time.time() - start
+
+def reset_before_test(featureTestMethod):
+ """
+ This is a decorator for all test methods needing reset().
+
+ @param featureTestMethod: The method to be decorated. All decorated methods
+ must have 'self' object as first arg.
+ @type featureTestMethod: function
+ """
+ def call(*args, **kwargs):
+ self = args[0]
+ if isinstance(self, FeatureTest):
+ self._reset()
+ return featureTestMethod(*args, **kwargs)
+ return call
+
+def display_plot(yvals, xvals=None, yformatter=None, display_time=3, **kwargs):
+ """
+ Displays a plot of data in interactive mode. This method is made to be
+ called successively for plot refreshing.
+
+ @param yvals: Ordinate values (float).
+ @type yvals: list
+ @param xvals: Abscissa values (float).
+ @type xvals: list
+ @param yformatter: The matplotlib FuncFormatter to use for y values.
+ @type yformatter: matplotlib.ticker.FuncFormatter
+ @param displaytime: The time matplotlib can take to refresht the plot.
+ @type displaytime: int
+ """
+ plt.ion()
+ plt.clf()
+ plt.show()
+ if yformatter:
+ plt.axes().yaxis.set_major_formatter(Kbit_format)
+ if xvals:
+ plt.plot(xvals, yvals, **kwargs)
+ else:
+ plt.plot(yvals, **kwargs)
+ plt.pause(display_time)
+
+def display_traffic_plot(ifname):
+ """Displays the traffic plot for a given interface name.
+
+ @param ifname: Interface name.
+ @type ifname: string
+ """
+ ydata = []
+ xdata = []
+ # warning: infinite loop
+ interval = 2
+ for rate in iftop_traffic_data(ifname, interval=interval):
+ ydata.append(rate)
+ xdata.append((xdata[-1] if len(xdata) > 0 else 0) + interval)
+ display_plot(ydata, xvals=xdata, yformatter=Kbit_format, color='blue')
+
+def iftop_traffic_data(ifname, interval=2, rate_type='send_receive'):
+ """
+ Generator (yields data) function collecting traffic data from iftop
+ subprocess.
+
+ @param ifname: Interface to listen to.
+ @type ifname: string
+ @param interval: Interval of time between to data collections. Possible
+ values are 2, 10 or 40.
+ @type interval: int
+ @param rates: (default: send_receive) Wether to pick "send", "receive"
+ or "send and receive" rates. Possible values : "send",
+ "receive" and "send_receive".
+ @type rates: string
+ @param _format: Format in which to display data on the y axis.
+ Possible values: Mb, Kb or b.
+ @type _format: string
+ """
+ # iftop stdout string format
+ SEND_RATE_STR = "Total send rate"
+ RECEIVE_RATE_STR = "Total receive rate"
+ SEND_RECEIVE_RATE_STR = "Total send and receive rate"
+ RATE_STR = {
+ "send" : SEND_RATE_STR,
+ "receive" : RECEIVE_RATE_STR,
+ "send_receive" : SEND_RECEIVE_RATE_STR
+ }
+ TWO_SECONDS_RATE_COL = 0
+ TEN_SECONDS_RATE_COL = 1
+ FOURTY_SECONDS_RATE_COL = 2
+ COLS = {
+ 2 : TWO_SECONDS_RATE_COL,
+ 10 : TEN_SECONDS_RATE_COL,
+ 40 : FOURTY_SECONDS_RATE_COL
+ }
+ FLOAT_REGEX = "[0-9]+[.]*[0-9]*"
+ BIT_REGEX = "[KM]*b"
+
+ iftop = subprocess.Popen(["iftop", "-i", ifname, "-t"], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
+ while True:
+ line = iftop.stdout.readline().decode()
+ if RATE_STR[rate_type] in line:
+ rate, unit = re.findall("("+FLOAT_REGEX+")("+BIT_REGEX+")", line)[COLS[interval]]
+ rate = float(rate)
+ if unit == "Kb":
+ rate *= 1024
+ elif unit == "Mb":
+ rate *= 1024**2
+ yield rate
+
+###########
+# Tests #
+###########
+
+class FeatureTest(object):
+ """
+ This is a base test.
+ """
+
+ done = 0
+ lock = None
+
+ def __init__(self, test, workbench):
+ """
+ @param test: The test string indicating the test to run. This string is
+ determined in the child classes.
+ @type test: string
+
+ @param workbench: A WorkBench object to use inside this test.
+ @type workbench: WorkBench
+ """
+ self._test = test
+ self._workbench = workbench
+ self._bootstrap = self._workbench.get_bootstrap()
+
+ def _reset(self):
+ """
+ Resets some static variables.
+
+ This method is most likely going to be called before each tests.
+ """
+ FeatureTest.done = 0
+ FeatureTest.lock = threading.Condition()
+
+ def run(self):
+ raise NotImplementedError('This method must be implemented.')
+
+##################################
+# PHT #
+##################################
+
+class PhtTest(FeatureTest):
+ """TODO
+ """
+
+ indexEntries = None
+ prefix = None
+ key = None
+
+ def __init__(self, test, workbench, opts):
+ """
+ @param test: is one of the following:
+ - 'insert': indexes a considerable amount of data in
+ the PHT structure.
+ TODO
+ @type test: string
+
+ @param opts: Dictionnary containing options for the test. Allowed
+ options are:
+ - 'num_keys': this specifies the number of keys to insert
+ in the PHT during the test.
+ @type opts: dict
+ """
+ super(PhtTest, self).__init__(test, workbench)
+ self._num_keys = opts['num_keys'] if 'num_keys' in opts else 32
+ self._timer = True if 'timer' in opts else False
+
+ def _reset(self):
+ super(PhtTest, self)._reset()
+ PhtTest.indexEntries = []
+
+ @staticmethod
+ def lookupCb(vals, prefix):
+ PhtTest.indexEntries = list(vals)
+ PhtTest.prefix = prefix.decode()
+ DhtNetwork.log('Index name: <todo>')
+ DhtNetwork.log('Leaf prefix:', prefix)
+ for v in vals:
+ DhtNetwork.log('[ENTRY]:', v)
+
+ @staticmethod
+ def lookupDoneCb(ok):
+ DhtNetwork.log('[LOOKUP]:', PhtTest.key, "--", "success!" if ok else "Fail...")
+ with FeatureTest.lock:
+ FeatureTest.lock.notify()
+
+ @staticmethod
+ def insertDoneCb(ok):
+ DhtNetwork.log('[INSERT]:', PhtTest.key, "--", "success!" if ok else "Fail...")
+ with FeatureTest.lock:
+ FeatureTest.lock.notify()
+
+ @staticmethod
+ def drawTrie(trie_dict):
+ """
+ Draws the trie structure of the PHT from dictionnary.
+
+ @param trie_dict: Dictionnary of index entries (prefix -> entry).
+ @type trie_dict: dict
+ """
+ prefixes = list(trie_dict.keys())
+ if len(prefixes) == 0:
+ return
+
+ edges = list([])
+ for prefix in prefixes:
+ for i in range(-1, len(prefix)-1):
+ u = prefix[:i+1]
+ x = ("." if i == -1 else u, u+"0")
+ y = ("." if i == -1 else u, u+"1")
+ if x not in edges:
+ edges.append(x)
+ if y not in edges:
+ edges.append(y)
+
+ # TODO: use a binary tree position layout...
+ # UPDATE : In a better way [change lib]
+ G = nx.Graph(sorted(edges, key=lambda x: len(x[0])))
+ plt.title("PHT: Tree")
+ pos=graphviz_layout(G,prog='dot')
+ nx.draw(G, pos, with_labels=True, node_color='white')
+ plt.show()
+
+ def run(self):
+ try:
+ if self._test == 'insert':
+ self._insertTest()
+ except Exception as e:
+ print(e)
+ finally:
+ self._bootstrap.resize(1)
+
+ ###########
+ # Tests #
+ ###########
+
+ @reset_before_test
+ def _insertTest(self):
+ """TODO: Docstring for _massIndexTest.
+ """
+ bootstrap = self._bootstrap
+ bootstrap.resize(2)
+ dht = bootstrap.get(1)
+
+ NUM_DIG = max(math.log(self._num_keys, 2)/4, 5) # at least 5 digit keys.
+ keyspec = collections.OrderedDict([('foo', NUM_DIG)])
+ pht = Pht(b'foo_index', keyspec, dht)
+
+ DhtNetwork.log('PHT has',
+ pht.MAX_NODE_ENTRY_COUNT,
+ 'node'+ ('s' if pht.MAX_NODE_ENTRY_COUNT > 1 else ''),
+ 'per leaf bucket.')
+ keys = [{
+ [_ for _ in keyspec.keys()][0] :
+ ''.join(random.SystemRandom().choice(string.hexdigits)
+ for _ in range(NUM_DIG)).encode()
+ } for n in range(self._num_keys)]
+ all_entries = {}
+
+ # Index all entries.
+ for key in keys:
+ PhtTest.key = key
+ with FeatureTest.lock:
+ time_taken = timer(pht.insert, key, IndexValue(random_hash()), PhtTest.insertDoneCb)
+ if self._timer:
+ DhtNetwork.log('This insert step took : ', time_taken, 'second')
+ FeatureTest.lock.wait()
+
+ time.sleep(1)
+
+ # Recover entries now that the trie is complete.
+ for key in keys:
+ PhtTest.key = key
+ with FeatureTest.lock:
+ time_taken = timer(pht.lookup, key, PhtTest.lookupCb, PhtTest.lookupDoneCb)
+ if self._timer:
+ DhtNetwork.log('This lookup step took : ', time_taken, 'second')
+ FeatureTest.lock.wait()
+
+ all_entries[PhtTest.prefix] = [e.__str__()
+ for e in PhtTest.indexEntries]
+
+ for p in all_entries.keys():
+ DhtNetwork.log('All entries under prefix', p, ':')
+ DhtNetwork.log(all_entries[p])
+ PhtTest.drawTrie(all_entries)
+
+##################################
+# DHT #
+##################################
+
+class DhtFeatureTest(FeatureTest):
+ """
+ This is a base dht test.
+ """
+ #static variables used by class callbacks
+ successfullTransfer = lambda lv,fv: len(lv) == len(fv)
+ foreignNodes = None
+ foreignValues = None
+
+ def __init__(self, test, workbench):
+ super(DhtFeatureTest, self).__init__(test, workbench)
+
+ def _reset(self):
+ super(DhtFeatureTest, self)._reset()
+ DhtFeatureTest.foreignNodes = []
+ DhtFeatureTest.foreignValues = []
+
+ @staticmethod
+ def getcb(value):
+ vstr = value.__str__()[:100]
+ DhtNetwork.Log.log('[GET]: %s' % vstr + ("..." if len(vstr) > 100 else ""))
+ DhtFeatureTest.foreignValues.append(value)
+ return True
+
+ @staticmethod
+ def putDoneCb(ok, nodes):
+ with FeatureTest.lock:
+ if not ok:
+ DhtNetwork.Log.log("[PUT]: failed!")
+ FeatureTest.done -= 1
+ FeatureTest.lock.notify()
+
+ @staticmethod
+ def getDoneCb(ok, nodes):
+ with FeatureTest.lock:
+ if not ok:
+ DhtNetwork.Log.log("[GET]: failed!")
+ else:
+ for node in nodes:
+ if not node.getNode().isExpired():
+ DhtFeatureTest.foreignNodes.append(node.getId().toString())
+ FeatureTest.done -= 1
+ FeatureTest.lock.notify()
+
+ def _dhtPut(self, producer, _hash, *values):
+ with FeatureTest.lock:
+ for val in values:
+ vstr = val.__str__()[:100]
+ DhtNetwork.Log.log('[PUT]:', _hash.toString(), '->', vstr + ("..." if len(vstr) > 100 else ""))
+ FeatureTest.done += 1
+ producer.put(_hash, val, DhtFeatureTest.putDoneCb)
+ while FeatureTest.done > 0:
+ FeatureTest.lock.wait()
+
+ def _dhtGet(self, consumer, _hash):
+ DhtFeatureTest.foreignValues = []
+ DhtFeatureTest.foreignNodes = []
+ with FeatureTest.lock:
+ FeatureTest.done += 1
+ DhtNetwork.Log.log('[GET]:', _hash.toString())
+ consumer.get(_hash, DhtFeatureTest.getcb, DhtFeatureTest.getDoneCb)
+ while FeatureTest.done > 0:
+ FeatureTest.lock.wait()
+
+ def _gottaGetThemAllPokeNodes(self, consumer, hashes, nodes=None):
+ for h in hashes:
+ self._dhtGet(consumer, h)
+ if nodes is not None:
+ for n in DhtFeatureTest.foreignNodes:
+ nodes.add(n)
+
+class PersistenceTest(DhtFeatureTest):
+ """
+ This tests persistence of data on the network.
+ """
+
+ def __init__(self, test, workbench, opts):
+ """
+ @param test: is one of the following:
+ - 'mult_time': test persistence of data based on internal
+ OpenDHT storage maintenance timings.
+ - 'delete': test persistence of data upon deletion of
+ nodes.
+ - 'replace': replacing cluster successively.
+ @type test: string
+
+
+ OPTIONS
+
+ - dump_str_log: Enables storage log at test ending.
+ - keep_alive: Keeps the test running indefinately. This may be useful
+ to manually analyse the network traffic during a longer
+ period.
+ - num_producers: Number of producers of data during a DHT test.
+ - num_values: Number of values to initialize the DHT with.
+ """
+
+ # opts
+ super(PersistenceTest, self).__init__(test, workbench)
+ self._traffic_plot = True if 'traffic_plot' in opts else False
+ self._dump_storage = True if 'dump_str_log' in opts else False
+ self._op_plot = True if 'op_plot' in opts else False
+ self._keep_alive = True if 'keep_alive' in opts else False
+ self._num_producers = opts['num_producers'] if 'num_producers' in opts else None
+ self._num_values = opts['num_values'] if 'num_values' in opts else None
+
+ def _trigger_dp(self, trigger_nodes, _hash, count=1):
+ """
+ Triggers the data persistence over time. In order to this, `count` nodes
+ are created with an id around the hash of a value.
+
+ @param trigger_nodes: List of created nodes. The nodes created in this
+ function are append to this list.
+ @type trigger_nodes: list
+ @param _hash: Is the id of the value around which creating nodes.
+ @type _hash: InfoHash
+ @param count: The number of nodes to create with id around the id of
+ value.
+ @type count: int
+ """
+ _hash_str = _hash.toString().decode()
+ _hash_int = int(_hash_str, 16)
+ for i in range(int(-count/2), int(count/2)+1):
+ _hash_str = '{:40x}'.format(_hash_int + i)
+ config = DhtConfig()
+ config.setNodeId(InfoHash(_hash_str.encode()))
+ n = DhtRunner()
+ n.run(config=config)
+ n.bootstrap(self._bootstrap.ip4,
+ str(self._bootstrap.port))
+ DhtNetwork.log('Node','['+_hash_str+']',
+ 'started around', _hash.toString().decode()
+ if n.isRunning() else
+ 'failed to start...'
+ )
+ trigger_nodes.append(n)
+
+ def _result(self, local_values, new_nodes):
+ bootstrap = self._bootstrap
+ if not DhtFeatureTest.successfullTransfer(local_values, DhtFeatureTest.foreignValues):
+ DhtNetwork.Log.log('[GET]: Only %s on %s values persisted.' %
+ (len(DhtFeatureTest.foreignValues), len(local_values)))
+ else:
+ DhtNetwork.Log.log('[GET]: All values successfully persisted.')
+ if DhtFeatureTest.foreignValues:
+ if new_nodes:
+ DhtNetwork.Log.log('Values are newly found on:')
+ for node in new_nodes:
+ DhtNetwork.Log.log(node)
+ if self._dump_storage:
+ DhtNetwork.Log.log('Dumping all storage log from '\
+ 'hosting nodes.')
+ for proc in self._workbench.procs:
+ proc.sendClusterRequest(DhtNetworkSubProcess.DUMP_STORAGE_REQ, DhtFeatureTest.foreignNodes)
+ else:
+ DhtNetwork.Log.log("Values didn't reach new hosting nodes after shutdown.")
+
+ def run(self):
+ try:
+ if self._test == 'normal':
+ self._totallyNormalTest()
+ elif self._test == 'delete':
+ self._deleteTest()
+ elif self._test == 'replace':
+ self._replaceClusterTest()
+ elif self._test == 'mult_time':
+ self._multTimeTest()
+ else:
+ raise NameError("This test is not defined '" + self._test + "'")
+ except Exception as e:
+ traceback.print_tb(e.__traceback__)
+ print(type(e).__name__+':', e, file=sys.stderr)
+ finally:
+ if self._traffic_plot or self._op_plot:
+ plot_fname = "traffic-plot"
+ print('plot saved to', plot_fname)
+ plt.savefig(plot_fname)
+ self._bootstrap.resize(1)
+
+ ###########
+ # Tests #
+ ###########
+
+ @reset_before_test
+ def _totallyNormalTest(self):
+ """
+ Reproduces a network in a realistic state.
+ """
+ trigger_nodes = []
+ wb = self._workbench
+ bootstrap = self._bootstrap
+ # Value representing an ICE packet. Each ICE packet is around 1KB.
+ VALUE_SIZE = 1024
+ num_values_per_hash = self._num_values/wb.node_num if self._num_values else 5
+
+ # nodes and values counters
+ total_nr_values = 0
+ nr_nodes = wb.node_num
+ op_cv = threading.Condition()
+
+ # values string in string format. Used for sending cluster request.
+ hashes = [random_hash() for _ in range(wb.node_num)]
+
+ def normalBehavior(do, t):
+ nonlocal total_nr_values, op_cv
+ while True:
+ with op_cv:
+ do()
+ time.sleep(random.uniform(0.0, float(t)))
+
+ def putRequest():
+ nonlocal hashes, VALUE_SIZE, total_nr_values
+ lock = threading.Condition()
+ def dcb(success):
+ nonlocal total_nr_values, lock
+ if success:
+ total_nr_values += 1
+ DhtNetwork.Log.log("INFO: "+ str(total_nr_values)+" values put on the dht since begining")
+ with lock:
+ lock.notify()
+ with lock:
+ DhtNetwork.Log.warn("Random value put on the DHT...")
+ random.choice(wb.procs).sendClusterPutRequest(random.choice(hashes).toString(),
+ random_str_val(size=VALUE_SIZE).encode(),
+ done_cb=dcb)
+ lock.wait()
+
+ puts = threading.Thread(target=normalBehavior, args=(putRequest, 30.0/wb.node_num))
+ puts.daemon = True
+ puts.start()
+
+ def newNodeRequest():
+ nonlocal nr_nodes
+ lock = threading.Condition()
+ def dcb(success):
+ nonlocal nr_nodes, lock
+ nr_nodes += 1
+ DhtNetwork.Log.log("INFO: now "+str(nr_nodes)+" nodes on the dht")
+ with lock:
+ lock.notify()
+ with lock:
+ DhtNetwork.Log.warn("Node joining...")
+ random.choice(wb.procs).sendClusterRequest(DhtNetworkSubProcess.NEW_NODE_REQ, done_cb=dcb)
+ lock.wait()
+
+ connections = threading.Thread(target=normalBehavior, args=(newNodeRequest, 1*50.0/wb.node_num))
+ connections.daemon = True
+ connections.start()
+
+ def shutdownNodeRequest():
+ nonlocal nr_nodes
+ lock = threading.Condition()
+ def dcb(success):
+ nonlocal nr_nodes, lock
+ if success:
+ nr_nodes -= 1
+ DhtNetwork.Log.log("INFO: now "+str(nr_nodes)+" nodes on the dht")
+ else:
+ DhtNetwork.Log.err("Oops.. No node to shutodwn.")
+
+ with lock:
+ lock.notify()
+ with lock:
+ DhtNetwork.Log.warn("Node shutting down...")
+ random.choice(wb.procs).sendClusterRequest(DhtNetworkSubProcess.SHUTDOWN_NODE_REQ, done_cb=dcb)
+ lock.wait()
+
+ shutdowns = threading.Thread(target=normalBehavior, args=(shutdownNodeRequest, 1*60.0/wb.node_num))
+ shutdowns.daemon = True
+ shutdowns.start()
+
+ if self._traffic_plot:
+ display_traffic_plot('br'+wb.ifname)
+ else:
+ # blocks in matplotlib thread
+ while True:
+ plt.pause(3600)
+
+
+ @reset_before_test
+ def _deleteTest(self):
+ """
+ It uses Dht shutdown call from the API to gracefuly finish the nodes one
+ after the other.
+ """
+ bootstrap = self._bootstrap
+
+ ops_count = []
+
+ bootstrap.resize(3)
+ consumer = bootstrap.get(1)
+ producer = bootstrap.get(2)
+
+ myhash = random_hash()
+ local_values = [Value(b'foo'), Value(b'bar'), Value(b'foobar')]
+
+ self._dhtPut(producer, myhash, *local_values)
+
+ #checking if values were transfered
+ self._dhtGet(consumer, myhash)
+ if not DhtFeatureTest.successfullTransfer(local_values, DhtFeatureTest.foreignValues):
+ if DhtFeatureTest.foreignValues:
+ DhtNetwork.Log.log('[GET]: Only ', len(DhtFeatureTest.foreignValues) ,' on ',
+ len(local_values), ' values successfully put.')
+ else:
+ DhtNetwork.Log.log('[GET]: 0 values successfully put')
+
+
+ if DhtFeatureTest.foreignValues and DhtFeatureTest.foreignNodes:
+ DhtNetwork.Log.log('Values are found on :')
+ for node in DhtFeatureTest.foreignNodes:
+ DhtNetwork.Log.log(node)
+
+ for _ in range(max(1, int(self._workbench.node_num/32))):
+ DhtNetwork.Log.log('Removing all nodes hosting target values...')
+ cluster_ops_count = 0
+ for proc in self._workbench.procs:
+ DhtNetwork.Log.log('[REMOVE]: sending shutdown request to', proc)
+ lock = threading.Condition()
+ def dcb(success):
+ nonlocal lock
+ if not success:
+ DhtNetwork.Log.err("Failed to shutdown.")
+ with lock:
+ lock.notify()
+
+ with lock:
+ proc.sendClusterRequest(
+ DhtNetworkSubProcess.SHUTDOWN_NODE_REQ,
+ DhtFeatureTest.foreignNodes,
+ done_cb=dcb
+ )
+ lock.wait()
+ DhtNetwork.Log.log('sending message stats request')
+ def msg_dcb(stats):
+ nonlocal cluster_ops_count, lock
+ if stats:
+ cluster_ops_count += sum(stats[1:])
+ with lock:
+ lock.notify()
+ with lock:
+ proc.sendGetMessageStats(done_cb=msg_dcb)
+ lock.wait()
+ DhtNetwork.Log.log("5 seconds wait...")
+ time.sleep(5)
+ ops_count.append(cluster_ops_count/self._workbench.node_num)
+
+ # checking if values were transfered to new nodes
+ foreignNodes_before_delete = DhtFeatureTest.foreignNodes
+ DhtNetwork.Log.log('[GET]: trying to fetch persistent values')
+ self._dhtGet(consumer, myhash)
+ new_nodes = set(DhtFeatureTest.foreignNodes) - set(foreignNodes_before_delete)
+
+ self._result(local_values, new_nodes)
+
+ if self._op_plot:
+ display_plot(ops_count, color='blue')
+ else:
+ DhtNetwork.Log.log("[GET]: either couldn't fetch values or nodes hosting values...")
+
+ if traffic_plot_thread:
+ print("Traffic plot running for ever. Ctrl-c for stopping it.")
+ traffic_plot_thread.join()
+
+ @reset_before_test
+ def _replaceClusterTest(self):
+ """
+ It replaces all clusters one after the other.
+ """
+ clusters = 8
+
+ bootstrap = self._bootstrap
+
+ bootstrap.resize(3)
+ consumer = bootstrap.get(1)
+ producer = bootstrap.get(2)
+
+ myhash = random_hash()
+ local_values = [Value(b'foo'), Value(b'bar'), Value(b'foobar')]
+
+ self._dhtPut(producer, myhash, *local_values)
+ self._dhtGet(consumer, myhash)
+ initial_nodes = DhtFeatureTest.foreignNodes
+
+ DhtNetwork.Log.log('Replacing', clusters, 'random clusters successively...')
+ for n in range(clusters):
+ i = random.randint(0, len(self._workbench.procs)-1)
+ proc = self._workbench.procs[i]
+ DhtNetwork.Log.log('Replacing', proc)
+ proc.sendClusterRequest(DhtNetworkSubProcess.SHUTDOWN_CLUSTER_REQ)
+ self._workbench.stop_cluster(i)
+ self._workbench.start_cluster(i)
+
+ DhtNetwork.Log.log('[GET]: trying to fetch persistent values')
+ self._dhtGet(consumer, myhash)
+ new_nodes = set(DhtFeatureTest.foreignNodes) - set(initial_nodes)
+
+ self._result(local_values, new_nodes)
+
+ @reset_before_test
+ def _multTimeTest(self):
+ """
+ Multiple put() calls are made from multiple nodes to multiple hashes
+ after what a set of 8 nodes is created around each hashes in order to
+ enable storage maintenance each nodes. Therefor, this tests will wait 10
+ minutes for the nodes to trigger storage maintenance.
+ """
+ trigger_nodes = []
+ bootstrap = self._bootstrap
+
+ N_PRODUCERS = self._num_producers if self._num_values else 16
+ DP_TIMEOUT = 1
+
+ hashes = []
+
+ # Generating considerable amount of values of size 1KB.
+ VALUE_SIZE = 1024
+ NUM_VALUES = self._num_values if self._num_values else 50
+ values = [Value(random_str_val(size=VALUE_SIZE).encode()) for _ in range(NUM_VALUES)]
+
+ bootstrap.resize(N_PRODUCERS+2)
+ consumer = bootstrap.get(N_PRODUCERS+1)
+ producers = (bootstrap.get(n) for n in range(1,N_PRODUCERS+1))
+ for p in producers:
+ hashes.append(random_hash())
+ self._dhtPut(p, hashes[-1], *values)
+
+ once = True
+ while self._keep_alive or once:
+ nodes = set([])
+ self._gottaGetThemAllPokeNodes(consumer, hashes, nodes=nodes)
+
+ DhtNetwork.Log.log("Values are found on:")
+ for n in nodes:
+ DhtNetwork.Log.log(n)
+
+ DhtNetwork.Log.log("Creating 8 nodes around all of these hashes...")
+ for _hash in hashes:
+ self._trigger_dp(trigger_nodes, _hash, count=8)
+
+ DhtNetwork.Log.log('Waiting', DP_TIMEOUT+1, 'minutes for normal storage maintenance.')
+ time.sleep((DP_TIMEOUT+1)*60)
+
+ DhtNetwork.Log.log('Deleting old nodes from previous search.')
+ for proc in self._workbench.procs:
+ DhtNetwork.Log.log('[REMOVE]: sending delete request to', proc)
+ proc.sendClusterRequest(
+ DhtNetworkSubProcess.REMOVE_NODE_REQ,
+ nodes)
+
+ # new consumer (fresh cache)
+ bootstrap.resize(N_PRODUCERS+1)
+ bootstrap.resize(N_PRODUCERS+2)
+ consumer = bootstrap.get(N_PRODUCERS+1)
+
+ nodes_after_time = set([])
+ self._gottaGetThemAllPokeNodes(consumer, hashes, nodes=nodes_after_time)
+ self._result(values, nodes_after_time - nodes)
+
+ once = False
+
+
+class PerformanceTest(DhtFeatureTest):
+ """
+ Tests for general performance of dht operations.
+ """
+
+ def __init__(self, test, workbench, opts):
+ """
+ @param test: is one of the following:
+ - 'gets': multiple get operations and statistical results.
+ - 'delete': perform multiple put() operations followed
+ by targeted deletion of nodes hosting the values. Doing
+ so until half of the nodes on the network remain.
+ @type test: string
+ """
+ super(PerformanceTest, self).__init__(test, workbench)
+
+ def run(self):
+ try:
+ if self._test == 'gets':
+ self._getsTimesTest()
+ elif self._test == 'delete':
+ self._delete()
+ else:
+ raise NameError("This test is not defined '" + self._test + "'")
+ except Exception as e:
+ traceback.print_tb(e.__traceback__)
+ print(type(e).__name__+':', e, file=sys.stderr)
+ finally:
+ self._bootstrap.resize(1)
+
+
+ ###########
+ # Tests #
+ ###########
+
+ @reset_before_test
+ def _getsTimesTest(self):
+ """
+ Tests for performance of the DHT doing multiple get() operation.
+ """
+ bootstrap = self._bootstrap
+
+ plt.ion()
+
+ fig, axes = plt.subplots(2, 1)
+ fig.tight_layout()
+
+ lax = axes[0]
+ hax = axes[1]
+
+ lines = None#ax.plot([])
+ #plt.ylabel('time (s)')
+ hax.set_ylim(0, 2)
+
+ # let the network stabilise
+ plt.pause(20)
+
+ #start = time.time()
+ times = []
+
+ lock = threading.Condition()
+ done = 0
+
+ def getcb(v):
+ nonlocal bootstrap
+ DhtNetwork.Log.log("found", v)
+ return True
+
+ def donecb(ok, nodes, start):
+ nonlocal bootstrap, lock, done, times
+ t = time.time()-start
+ with lock:
+ if not ok:
+ DhtNetwork.Log.log("failed !")
+ times.append(t)
+ done -= 1
+ lock.notify()
+
+ def update_plot():
+ nonlocal lines
+ while lines:
+ l = lines.pop()
+ l.remove()
+ del l
+ if len(times) > 1:
+ n, bins, lines = hax.hist(times, 100, normed=1, histtype='stepfilled', color='g')
+ hax.set_ylim(min(n), max(n))
+ lines.extend(lax.plot(times, color='blue'))
+ plt.draw()
+
+ def run_get():
+ nonlocal done
+ done += 1
+ start = time.time()
+ bootstrap.front().get(InfoHash.getRandom(), getcb, lambda ok, nodes: donecb(ok, nodes, start))
+
+ plt.pause(5)
+
+ plt.show()
+ update_plot()
+
+ times = []
+ for n in range(10):
+ self._workbench.replace_cluster()
+ plt.pause(2)
+ DhtNetwork.Log.log("Getting 50 random hashes succesively.")
+ for i in range(50):
+ with lock:
+ for _ in range(1):
+ run_get()
+ while done > 0:
+ lock.wait()
+ update_plot()
+ plt.pause(.1)
+ update_plot()
+ print("Took", np.sum(times), "mean", np.mean(times), "std", np.std(times), "min", np.min(times), "max", np.max(times))
+
+ print('GET calls timings benchmark test : DONE. ' \
+ 'Close Matplotlib window for terminating the program.')
+ plt.ioff()
+ plt.show()
+
+ @reset_before_test
+ def _delete(self):
+ """
+ Tests for performance of get() and put() operations on the network while
+ deleting around the target hash.
+ """
+
+ bootstrap = self._bootstrap
+
+ bootstrap.resize(3)
+ consumer = bootstrap.get(1)
+ producer = bootstrap.get(2)
+
+ myhash = random_hash()
+ local_values = [Value(b'foo'), Value(b'bar'), Value(b'foobar')]
+
+ for _ in range(max(1, int(self._workbench.node_num/32))):
+ self._dhtGet(consumer, myhash)
+ DhtNetwork.Log.log("Waiting 15 seconds...")
+ time.sleep(15)
+
+ self._dhtPut(producer, myhash, *local_values)
+
+ #checking if values were transfered
+ self._dhtGet(consumer, myhash)
+ DhtNetwork.Log.log('Values are found on :')
+ for node in DhtFeatureTest.foreignNodes:
+ DhtNetwork.Log.log(node)
+
+ if not DhtFeatureTest.successfullTransfer(local_values, DhtFeatureTest.foreignValues):
+ if DhtFeatureTest.foreignValues:
+ DhtNetwork.Log.log('[GET]: Only ', len(DhtFeatureTest.foreignValues) ,' on ',
+ len(local_values), ' values successfully put.')
+ else:
+ DhtNetwork.Log.log('[GET]: 0 values successfully put')
+
+ DhtNetwork.Log.log('Removing all nodes hosting target values...')
+ for proc in self._workbench.procs:
+ DhtNetwork.Log.log('[REMOVE]: sending shutdown request to', proc)
+ proc.sendClusterRequest(
+ DhtNetworkSubProcess.SHUTDOWN_NODE_REQ,
+ DhtFeatureTest.foreignNodes
+ )
--- /dev/null
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# Copyright (c) 2015-2016 Savoir-faire Linux Inc.
+# Author: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; If not, see <http://www.gnu.org/licenses/>.
+
+import argparse, subprocess
+
+from pyroute2 import IPDB, NetNS
+from pyroute2.netns.process.proxy import NSPopen
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description='Creates a virtual network topology for testing')
+ parser.add_argument('-i', '--ifname', help='interface name', default='ethdht')
+ parser.add_argument('-n', '--ifnum', type=int, help='number of isolated interfaces to create', default=1)
+ parser.add_argument('-r', '--remove', help='remove instead of adding network interfaces', action="store_true")
+ parser.add_argument('-l', '--loss', help='simulated packet loss (percent)', type=int, default=0)
+ parser.add_argument('-d', '--delay', help='simulated latency (ms)', type=int, default=0)
+ parser.add_argument('-4', '--ipv4', help='Enable IPv4', action="store_true")
+ parser.add_argument('-6', '--ipv6', help='Enable IPv6', action="store_true")
+
+ args = parser.parse_args()
+
+ local_addr4 = '10.0.42.'
+ local_addr6 = '2001:db9::'
+ brige_name = 'br'+args.ifname
+
+ ip = None
+ try:
+ ip = IPDB()
+ if args.remove:
+ # cleanup interfaces
+ for ifn in range(args.ifnum):
+ iface = args.ifname+str(ifn)
+ if iface in ip.interfaces:
+ with ip.interfaces[iface] as i:
+ i.remove()
+ if 'tap'+args.ifname in ip.interfaces:
+ with ip.interfaces['tap'+args.ifname] as i:
+ i.remove()
+ if brige_name in ip.interfaces:
+ with ip.interfaces[brige_name] as i:
+ i.remove()
+ for ifn in range(args.ifnum):
+ netns = NetNS('node'+str(ifn))
+ netns.close()
+ netns.remove()
+ else:
+ for ifn in range(args.ifnum):
+ iface = args.ifname+str(ifn)
+ if not iface in ip.interfaces:
+ ip.create(kind='veth', ifname=iface, peer=iface+'.1').commit()
+
+ ip.create(kind='tuntap', ifname='tap'+args.ifname, mode='tap').commit()
+
+ with ip.create(kind='bridge', ifname=brige_name) as i:
+ for ifn in range(args.ifnum):
+ iface = args.ifname+str(ifn)
+ i.add_port(ip.interfaces[iface])
+ i.add_port(ip.interfaces['tap'+args.ifname])
+ if args.ipv4:
+ i.add_ip(local_addr4+'1/24')
+ if args.ipv6:
+ i.add_ip(local_addr6+'1/64')
+ i.up()
+
+ with ip.interfaces['tap'+args.ifname] as tap:
+ tap.up()
+
+ for ifn in range(args.ifnum):
+ iface = args.ifname+str(ifn)
+
+ nns = NetNS('node'+str(ifn))
+ iface1 = iface+'.1'
+ with ip.interfaces[iface1] as i:
+ i.net_ns_fd = nns.netns
+
+ with ip.interfaces[iface] as i:
+ i.up()
+
+ ip_ns = IPDB(nl=nns)
+ try:
+ with ip_ns.interfaces.lo as lo:
+ lo.up()
+ with ip_ns.interfaces[iface1] as i:
+ if args.ipv4:
+ i.add_ip(local_addr4+str(ifn+8)+'/24')
+ if args.ipv6:
+ i.add_ip(local_addr6+str(ifn+8)+'/64')
+ i.up()
+ finally:
+ ip_ns.release()
+
+ nsp = NSPopen(nns.netns, ["tc", "qdisc", "add", "dev", iface1, "root", "netem", "delay", str(args.delay)+"ms", str(int(args.delay/2))+"ms", "loss", str(args.loss)+"%", "25%"], stdout=subprocess.PIPE)
+ #print(nsp.communicate()[0].decode())
+ nsp.communicate()
+ nsp.wait()
+ nsp.release()
+
+ if args.ipv4:
+ subprocess.call(["sysctl", "-w", "net.ipv4.conf."+brige_name+".forwarding=1"])
+ if args.ipv6:
+ subprocess.call(["sysctl", "-w", "net.ipv6.conf."+brige_name+".forwarding=1"])
+
+ except Exception as e:
+ print('Error',e)
+ finally:
+ if ip:
+ ip.release()
--- /dev/null
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# Copyright (C) 2015-2017 Savoir-faire Linux Inc.
+# Author(s): Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; If not, see <http://www.gnu.org/licenses/>.
+
+import os, sys, time, cmd
+import signal
+import argparse
+import logging
+import logging.handlers
+
+import opendht as dht
+
+logger = logging.getLogger('dhtcluster')
+
+class NodeCluster(object):
+ nodes = []
+ node_uid = 0
+
+ @staticmethod
+ def run_node(ip4, ip6, p, bootstrap=None, is_bootstrap=False, logfile=None):
+ logger.info("Running node on port %s with bootstrap %s", p, bootstrap)
+ n = dht.DhtRunner()
+ n.run(ipv4=ip4 if ip4 else "", ipv6=ip6 if ip6 else "", port=p, is_bootstrap=is_bootstrap)
+ if logfile:
+ n.enableFileLogging(logfile)
+ if bootstrap:
+ n.bootstrap(bootstrap[0], bootstrap[1])
+ time.sleep(.01)
+ return ((ip4, ip6, p), n, id)
+
+ @staticmethod
+ def find_ip(iface):
+ if not iface or iface == 'any':
+ return ('0.0.0.0','::0')
+
+ if_ip4 = netifaces.ifaddresses(iface)[netifaces.AF_INET][0]['addr']
+ if_ip6 = netifaces.ifaddresses(iface)[netifaces.AF_INET6][0]['addr']
+ return (if_ip4, if_ip6)
+
+ def __init__(self, iface=None, ip4=None, ip6=None, port=4000, bootstrap=None, first_bootstrap=False, logfile=None):
+ NodeCluster.iface = iface
+ self.port = port
+ ips = NodeCluster.find_ip(iface)
+ self.logfile = logfile
+ self.ip4 = ip4 if ip4 else ips[0]
+ self.ip6 = ip6 if ip6 else ips[1]
+ self.bootstrap = bootstrap
+ if bootstrap:
+ self.bootstrap = (bootstrap.hostname, str(bootstrap.port) if bootstrap.port else "4222")
+ else:
+ logger.info("Using fallback bootstrap %s %s", self.ip4, self.port)
+ self.bootstrap = ((self.ip4, str(self.port)))
+ if first_bootstrap:
+ logger.info("Starting bootstrap node")
+ self.nodes.append(NodeCluster.run_node(self.ip4, self.ip6, self.port, self.bootstrap, is_bootstrap=True))
+ self.bootstrap = ((self.ip4, str(self.port)))
+ self.port += 1
+ #print(self.ip4, self.ip6, self.port)
+
+ def front(self):
+ return self.nodes[0][1] if self.nodes else None
+
+ def get(self, i):
+ if not self.nodes or i < 0 or i >= len(self.nodes):
+ return None
+ return self.nodes[i][1]
+
+ def getNodeInfoById(self, id=None):
+ if id:
+ for n in self.nodes:
+ if n[1].getNodeId() == id:
+ return n
+ return None
+
+ def launch_node(self):
+ node_logfile = (self.logfile + str(self.node_uid) + '.log') if self.logfile else None
+ n = NodeCluster.run_node(self.ip4, self.ip6, self.port, self.bootstrap, logfile=node_logfile)
+ self.nodes.append(n)
+ self.port += 1
+ self.node_uid += 1
+ return n
+
+ def end_node(self):
+ if not self.nodes:
+ return
+ else:
+ n = self.nodes.pop()
+ n[1].join()
+ return True
+
+ def resize(self, n):
+ n = min(n, 500)
+ l = len(self.nodes)
+ if n == l:
+ return
+ if n > l:
+ logger.info("Launching %d nodes bound on IPv4 %s IPv6 %s", n-l, self.ip4, self.ip6)
+ for i in range(l, n):
+ self.launch_node()
+ else:
+ logger.info("Ending %d nodes", l-n)
+ for i in range(n, l):
+ self.end_node()
+
+ def close(self):
+ self.resize(0)
+
+ def getMessageStats(self):
+ stats = np.array([0,0,0,0,0])
+ for n in self.nodes:
+ stats += np.array(n[1].getNodeMessageStats())
+ stats_list = [len(self.nodes)]
+ stats_list.extend(stats.tolist())
+ return stats_list
+
+class ClusterShell(cmd.Cmd):
+ intro = 'Welcome to the OpenDHT node cluster control. Type help or ? to list commands.\n'
+ prompt = '>> '
+ net = None
+ node = None
+ log = False
+ def __init__(self, network):
+ super(ClusterShell, self).__init__()
+ self.net = network
+ def setNode(self, node=None, num=0):
+ if node == self.node:
+ return
+ if self.node:
+ self.node.disableLogging()
+ self.node = node
+ if self.node:
+ self.prompt = '('+str(num)+') >> '
+ if self.log:
+ self.node.enableLogging()
+ else:
+ self.prompt = '>> '
+ def do_exit(self, arg):
+ self.close()
+ return True
+ def do_node(self, arg):
+ if not arg:
+ self.setNode()
+ else:
+ nodenum = int(arg)
+ node = self.net.get(nodenum-1)
+ if not node:
+ print("Invalid node number:", nodenum, " (accepted: 1-", len(self.net.nodes), ")")
+ else:
+ self.setNode(node, nodenum)
+ def do_resize(self, arg):
+ if not arg:
+ return
+ try:
+ nodenum = int(arg)
+ self.net.resize(nodenum)
+ except Exception as e:
+ print("Can't resize:", e)
+ def do_ll(self, arg):
+ if self.node:
+ print('Node', self.node.getNodeId().decode())
+ else:
+ print(len(self.net.nodes), 'nodes running.')
+ def do_ls(self, arg):
+ if self.node:
+ print(self.node.getSearchesLog(0))
+ else:
+ print('No node selected.')
+ def do_log(self, arg):
+ if self.node:
+ self.log = not self.log
+ if self.log:
+ self.node.enableLogging()
+ else:
+ self.node.disableLogging()
+ def do_EOF(self, line):
+ self.close()
+ return True
+ def close(self):
+ if self.net:
+ self.net.close()
+ self.net = None
+
+if __name__ == '__main__':
+ import argparse
+ from urllib.parse import urlparse
+ net = None
+ run = True
+ def clean_quit():
+ global net, run
+ if run:
+ run = False
+ if net:
+ net.resize(0)
+ net = None
+ def quit_signal(signum, frame):
+ clean_quit()
+
+ try:
+ parser = argparse.ArgumentParser(description='Create a dht network of -n nodes')
+ parser.add_argument('-n', '--node-num', help='number of dht nodes to run', type=int, default=32)
+ parser.add_argument('-I', '--iface', help='local interface to bind', default='any')
+ parser.add_argument('-p', '--port', help='start of port range (port, port+node_num)', type=int, default=4000)
+ parser.add_argument('-b', '--bootstrap', help='bootstrap address')
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument('-d', '--daemonize', help='daemonize process', action='store_true')
+ group.add_argument('-s', '--service', help='service mode (not forking)', action='store_true')
+ parser.add_argument('-l', '--log', help='log file prefix')
+ args = parser.parse_args()
+
+ if args.bootstrap:
+ args.bootstrap = urlparse('dht://'+args.bootstrap)
+
+ # setup logging
+ if args.daemonize or args.service:
+ syslog = logging.handlers.SysLogHandler(address = '/dev/log')
+ syslog.setLevel(logging.DEBUG)
+ logger.setLevel(logging.DEBUG)
+ logger.addHandler(syslog)
+ else:
+ logging.basicConfig(stream=sys.stdout,level=logging.DEBUG)
+
+ # start cluster
+ net = NodeCluster(iface=args.iface, port=args.port, bootstrap=args.bootstrap, logfile=args.log)
+
+ # main loop
+ if args.daemonize:
+ import daemon
+ context = daemon.DaemonContext()
+ context.signal_map = {
+ signal.SIGHUP: 'terminate',
+ signal.SIGTERM: quit_signal,
+ signal.SIGINT: quit_signal,
+ signal.SIGQUIT: quit_signal
+ }
+ with context:
+ net.resize(args.node_num)
+ while net:
+ time.sleep(1)
+ elif args.service:
+ signal.signal(signal.SIGTERM, quit_signal)
+ signal.signal(signal.SIGINT, quit_signal)
+ signal.signal(signal.SIGQUIT, quit_signal)
+ net.resize(args.node_num)
+ while net:
+ time.sleep(1)
+ else:
+ net.resize(args.node_num)
+ ClusterShell(net).cmdloop()
+ except Exception:
+ logger.error("Exception ", exc_info=1)
+ finally:
+ clean_quit()
+ logger.warning("Ending node cluster")
+ for handler in logger.handlers:
+ handler.close()
+ logger.removeHandler(handler)
--- /dev/null
+#!/usr/bin/env python3
+# Copyright (c) 2016 Savoir-faire Linux Inc.
+# Author: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; If not, see <http://www.gnu.org/licenses/>.
+
+from twisted.web import server, resource
+from twisted.internet import reactor, endpoints
+from urllib.parse import urlparse
+
+import opendht as dht
+import base64, json
+
+class DhtServer(resource.Resource):
+ isLeaf = True
+ node = dht.DhtRunner()
+
+ def __init__(self, port, bootstrap):
+ self.node.run(port=port)
+ b_url = urlparse('//'+bootstrap)
+ self.node.bootstrap(b_url.hostname, str(b_url.port) if b_url.port else '4222')
+
+ def render_GET(self, req):
+ uri = req.uri[1:].decode().rsplit('?', 1)[0]
+ h = dht.InfoHash(uri.encode()) if len(uri) == 40 else dht.InfoHash.get(uri)
+ w = dht.Where('WHERE '+''.join(k.decode()+'='+req.args[k][0].decode()+','
+ for k in req.args.keys()
+ if k in [b'id', b'user_type', b'value_type', b'owner', b'seq'])[:-1])
+ print('GET', '"'+uri+'"', h, w)
+ res = self.node.get(h, where=w)
+ req.setHeader(b"content-type", b"application/json")
+ return json.dumps({'{:x}'.format(v.id):{'base64':base64.b64encode(v.data).decode()} for v in res}).encode()
+
+ def render_POST(self, req):
+ uri = req.uri[1:]
+ data = req.args[b'data'][0] if b'data' in req.args else None
+ user_type = req.args[b'user_type'][0].decode() if b'user_type' in req.args else ""
+ try:
+ vid = int(req.args[b'id'][0].decode()) if b'id' in req.args else 0
+ except ValueError:
+ vid = 0
+ if not data and b'base64' in req.args:
+ data = base64.b64decode(req.args[b'base64'][0])
+ h = dht.InfoHash(uri) if len(uri) == 40 else dht.InfoHash.get(uri.decode())
+ print('POST', h, data)
+ req.setHeader(b"content-type", b"application/json")
+ if data:
+ v = dht.Value(data)
+ if vid != 0:
+ v.id = vid
+ v.user_type = user_type
+ self.node.put(h, v)
+ return json.dumps({'success':True}).encode()
+ else:
+ req.setResponseCode(400)
+ return json.dumps({'success':False, 'error':'no data parameter'}).encode()
+
+
+if __name__ == '__main__':
+ import argparse
+ parser = argparse.ArgumentParser(description='Launch an OpenDHT node with an HTTP control interface')
+ parser.add_argument('-p', '--port', help='OpenDHT port to bind', type=int, default=4222)
+ parser.add_argument('-hp', '--http-port', help='HTTP port to bind', type=int, default=8080)
+ parser.add_argument('-b', '--bootstrap', help='bootstrap address', default="bootstrap.ring.cx:4222")
+ args = parser.parse_args()
+ endpoints.serverFromString(reactor, "tcp:"+str(args.http_port)).listen(server.Site(DhtServer(args.port, args.bootstrap)))
+ reactor.run()
--- /dev/null
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 Savoir-faire Linux Inc.
+# Author: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; If not, see <http://www.gnu.org/licenses/>.
+
+import time
+import argparse
+import time
+import asyncio
+from datetime import datetime
+
+import opendht as dht
+
+parser = argparse.ArgumentParser(description='Create a dht network of -n nodes')
+parser.add_argument('-b', '--bootstrap', help='bootstrap address', default='bootstrap.ring.cx')
+parser.add_argument('-n', '--num-ops', help='number of concurrent operations on the DHT', type=int, default=8)
+parser.add_argument('-p', '--period', help='duration between each test (seconds)', type=int, default=60)
+parser.add_argument('-t', '--timeout', help='timeout for a test to complete (seconds)', type=float, default=15)
+args = parser.parse_args()
+
+node1 = dht.DhtRunner()
+node1.run()
+
+node2 = dht.DhtRunner()
+node2.run()
+
+node1.bootstrap(args.bootstrap)
+node2.bootstrap(args.bootstrap)
+loop = asyncio.get_event_loop()
+
+pending_tests = {}
+keys = [dht.InfoHash.getRandom() for _ in range(args.num_ops)]
+
+def listen_cb(key, val):
+ global pending_tests
+ kstr = str(key)
+ if kstr in pending_tests:
+ if pending_tests[kstr]['v'].id == val.id:
+ pending_tests.pop(kstr, None)
+ else:
+ print("Expected vid", val.id, "got", pending_tests[kstr]['v'].id)
+ return True
+
+def listen(key):
+ node1.listen(key, lambda v: loop.call_soon_threadsafe(listen_cb, key, v))
+
+for key in keys:
+ listen(key)
+
+next_test = time.time()
+while True:
+ start = time.time()
+ #print(datetime.fromtimestamp(start).strftime('%Y-%m-%d %H:%M:%S'), 'Test started')
+ for key in keys:
+ val = dht.Value(str(dht.InfoHash.getRandom()).encode())
+ pending_tests[str(key)] = {'v':val, 'c':0}
+ node2.put(key, val, lambda ok, nodes: ok)
+ while len(pending_tests):
+ loop.stop()
+ loop.run_forever()
+ time.sleep(1)
+ if time.time()-start > args.timeout:
+ print('Test timeout !')
+ exit(1)
+
+ end = time.time()
+ print(datetime.fromtimestamp(end).strftime('%Y-%m-%d %H:%M:%S'),
+ 'Test completed successfully in', end-start)
+ next_test += args.period
+ if next_test > end:
+ time.sleep(next_test-end)
--- /dev/null
+#!/usr/bin/env python3
+# Copyright (c) 2015-2017 Savoir-faire Linux Inc.
+# Author: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; If not, see <https://www.gnu.org/licenses/>.
+
+import opendht as dht
+import time
+import asyncio
+
+ping_node = dht.DhtRunner()
+ping_node.run()
+#ping_node.enableLogging()
+#ping_node.bootstrap("bootstrap.ring.cx", "4222")
+
+pong_node = dht.DhtRunner()
+pong_node.run()
+#pong_node.enableLogging()
+pong_node.ping(ping_node.getBound());
+
+loc_ping = dht.InfoHash.get("toto99")
+loc_pong = dht.InfoHash.get(str(loc_ping))
+
+i = 0
+MAX = 2048
+
+loop = asyncio.get_event_loop()
+
+def done(h, ok):
+ print(h, "over", ok)
+
+def ping(node, h):
+ global i
+ time.sleep(0.0075)
+ i += 1
+ if i < MAX:
+ node.put(h, dht.Value(b"hey"), lambda ok, nodes: done(node.getNodeId().decode(), ok))
+ else:
+ loop.stop()
+
+def pong(node, h):
+ print(node.getNodeId().decode(), "got ping", h, i)
+ loop.call_soon_threadsafe(ping, node, h)
+ return True
+
+ping_node.listen(loc_ping, lambda v: pong(pong_node, loc_pong))
+pong_node.listen(loc_pong, lambda v: pong(ping_node, loc_ping))
+
+ping(pong_node, loc_ping)
+
+loop.run_forever()
--- /dev/null
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 Savoir-faire Linux Inc.
+# Author: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; If not, see <http://www.gnu.org/licenses/>.
+
+import time, sys, os
+from pprint import pprint
+from math import cos, sin, pi
+import urllib3
+import gzip
+import queue
+
+sys.path.append('..')
+from opendht import *
+
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib.patches as mpatches
+from matplotlib.colors import colorConverter
+from matplotlib.collections import RegularPolyCollection
+from matplotlib.widgets import Button
+from mpl_toolkits.basemap import Basemap
+
+import GeoIP
+
+http = urllib3.PoolManager()
+
+run = True
+done = 0
+all_nodes = NodeSet()
+nodes_ip4s = {}
+nodes_ip6s = {}
+lats = []
+lons = []
+cities=[]
+xys = []
+colors = []
+all_lines = []
+points = []
+not_found = []
+
+plt.ion()
+plt.figaspect(2.)
+
+fig, axes = plt.subplots(2, 1)
+fig.set_size_inches(8,16,forward=True)
+fig.tight_layout()
+fig.canvas.set_window_title('OpenDHT scanner')
+
+mpx = axes[0]
+mpx.set_title("Node GeoIP")
+
+m = Basemap(projection='robin', resolution = 'l', area_thresh = 1000.0, lat_0=0, lon_0=0, ax=mpx)
+m.fillcontinents(color='#cccccc',lake_color='white')
+m.drawparallels(np.arange(-90.,120.,30.))
+m.drawmeridians(np.arange(0.,420.,60.))
+m.drawmapboundary(fill_color='white')
+plt.show()
+
+ringx = axes[1]
+ringx.set_title("Node IDs")
+ringx.set_autoscale_on(False)
+ringx.set_aspect('equal', 'datalim')
+ringx.set_xlim(-2.,2.)
+ringx.set_ylim(-1.5,1.5)
+
+exitax = plt.axes([0.92, 0.95, 0.07, 0.04])
+exitbtn = Button(exitax, 'Exit')
+reloadax = plt.axes([0.92, 0.90, 0.07, 0.04])
+button = Button(reloadax, 'Reload')
+
+collection = None
+infos = [ringx.text(1.2, -0.8, ""),
+ ringx.text(1.2, -0.9, "")]
+
+def exitcb(arg):
+ global run
+ run = False
+exitbtn.on_clicked(exitcb)
+
+def check_dl(fname, url):
+ if os.path.isfile(fname):
+ return
+ print('downloading', url)
+ ghandle = gzip.GzipFile(fileobj=http.request('GET', url, headers={'User-Agent': 'Mozilla/5.0'}, preload_content=False))
+ with open(fname, 'wb') as out:
+ for line in ghandle:
+ out.write(line)
+
+check_dl("GeoLiteCity.dat", "http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz")
+check_dl("GeoLiteCityv6.dat", "http://geolite.maxmind.com/download/geoip/database/GeoLiteCityv6-beta/GeoLiteCityv6.dat.gz")
+
+gi = GeoIP.open("GeoLiteCity.dat", GeoIP.GEOIP_INDEX_CACHE | GeoIP.GEOIP_CHECK_CACHE)
+gi6 = GeoIP.open("GeoLiteCityv6.dat", GeoIP.GEOIP_INDEX_CACHE | GeoIP.GEOIP_CHECK_CACHE)
+
+def gcb(v):
+ return True
+
+r = DhtRunner()
+r.run(port=4112)
+r.bootstrap("bootstrap.ring.cx", "4222")
+
+plt.pause(1)
+
+q = queue.Queue()
+
+def step(cur_h, cur_depth):
+ global done
+ done += 1
+ a = 2.*pi*cur_h.toFloat()
+ b = a + 2.*pi/(2**(cur_depth))
+ arc = []
+ lines = []
+ q.put((stepUi, (cur_h, cur_depth, arc, lines)))
+ print("step", cur_h, cur_depth)
+ r.get(cur_h, gcb, lambda d, nodes: stepdone(cur_h, cur_depth, d, nodes, arc, lines))
+
+def stepdone(cur_h, cur_depth, d, nodes, arc, lines):
+ #print("stepdone", cur_h, cur_depth)
+ q.put((nextstepUi, (nodes, arc, lines)))
+ nextstep(cur_h, cur_depth, d, nodes)
+
+def nextstep(cur_h, cur_depth, ok, nodes):
+ global done
+ if nodes:
+ commonBits = 0
+ if len(nodes) > 1:
+ snodes = NodeSet()
+ snodes.extend(nodes)
+ commonBits = InfoHash.commonBits(snodes.first(), snodes.last())
+ depth = min(8, commonBits+6)
+ if cur_depth < depth:
+ for b in range(cur_depth, depth):
+ new_h = InfoHash(cur_h.toString());
+ new_h.setBit(b, 1);
+ step(new_h, b+1);
+ else:
+ print("step done with no nodes", ok, cur_h.toString().decode())
+ done -= 1
+
+def stepUi(cur_h, cur_depth, arc, lines):
+ global all_lines
+ a = 2.*pi*cur_h.toFloat()
+ b = a + 2.*pi/(2**(cur_depth))
+ arc.append(ringx.add_patch(mpatches.Wedge([0.,0,], 1., a*180/pi, b*180/pi, fill=True, color="blue", alpha=0.5)))
+ lines.extend(ringx.plot([0, cos(a)], [0, sin(a)], 'k-', lw=1.2))
+ all_lines.extend(lines)
+
+def nextstepUi(nodes, arc=None, lines=[]):
+ for a in arc:
+ if a:
+ a.remove()
+ del a
+ for l in lines:
+ l.set_color('#aaaaaa')
+ if nodes:
+ appendNodes(nodes)
+
+def appendNodes(nodes):
+ global all_nodes
+ for n in nodes:
+ if all_nodes.insert(n):
+ appendNewNode(n)
+
+def appendNewNode(n):
+ global nodes_ip4s, nodes_ip6s, colors, xys
+ addr = b':'.join(n.getNode().getAddr().split(b':')[0:-1]).decode()
+ colors.append('red' if n.getNode().isExpired() else 'blue')
+ node_val = n.getId().toFloat()
+ xys.append((cos(node_val*2*pi), sin(node_val*2*pi)))
+ georecord = None
+ if addr[0] == '[':
+ addr = addr[1:-1]
+ if addr in nodes_ip6s:
+ nodes_ip6s[addr][1] += 1
+ else:
+ georecord = gi6.record_by_name_v6(addr)
+ nodes_ip6s[addr] = [n, 1, georecord]
+ else:
+ if addr in nodes_ip4s:
+ nodes_ip4s[addr][1] += 1
+ else:
+ georecord = gi.record_by_name(addr)
+ nodes_ip4s[addr] = [n, 1, georecord]
+ if georecord:
+ appendMapPoint(georecord)
+
+def appendMapPoint(res):
+ global lons, lats, cities
+ lons.append(res['longitude'])
+ lats.append(res['latitude'])
+ cities.append(res['city'] if res['city'] else (str(int(res['latitude']))+'-'+str(int(res['longitude']))))
+
+
+def restart(arg):
+ global collection, all_lines, all_nodes, points, done, nodes_ip4s, nodes_ip6s, lats, lons, cities, xys, colors
+ if done:
+ return
+ for l in all_lines:
+ l.remove()
+ del l
+ all_lines = []
+ all_nodes = NodeSet()
+ nodes_ip4s = {}
+ nodes_ip6s = {}
+ lats = []
+ lons = []
+ cities=[]
+ xys = []
+ colors = []
+ if collection:
+ collection.remove()
+ del collection
+ collection = None
+ for p in points:
+ p.remove()
+ del p
+ points = []
+
+ print(arg)
+ start_h = InfoHash()
+ start_h.setBit(159, 1)
+ step(start_h, 0)
+ plt.draw()
+
+def num_nodes(node_set):
+ return sorted([x for x in node_set.items()], key=lambda ip: ip[1][1])
+
+def update_plot():
+ #print("update_plot", done)
+ global m, collection, points
+ for p in points:
+ p.remove()
+ del p
+ points = []
+ x,y = m(lons,lats)
+ points.extend(m.plot(x,y,'bo'))
+ for name, xpt, ypt in zip(cities, x, y):
+ points.append(mpx.text(xpt+50000, ypt+50000, name))
+ if collection:
+ collection.remove()
+ del collection
+ collection = None
+ collection = ringx.add_collection(RegularPolyCollection(
+ int(fig.dpi), 6, sizes=(10,), facecolors=colors,
+ offsets = xys, transOffset = ringx.transData))
+
+ infos[0].set_text("{} different IPv4s".format(len(nodes_ip4s)))
+ infos[1].set_text("{} different IPv6s".format(len(nodes_ip6s)))
+
+def d(arg):
+ pass
+
+def handle_tasks():
+ while True:
+ try:
+ f = q.get_nowait()
+ f[0](*f[1])
+ q.task_done()
+ except Exception as e:
+ break;
+ update_plot()
+
+if run:
+ # start first step
+ start_h = InfoHash()
+ start_h.setBit(159, 1)
+ step(start_h, 0)
+
+while run:
+ while run and done > 0:
+ handle_tasks()
+ plt.pause(.5)
+
+ if not run:
+ break
+
+ button.on_clicked(restart)
+
+ node_ip4s = num_nodes(nodes_ip4s)
+ node_ip6s = num_nodes(nodes_ip6s)
+
+ print(all_nodes.size(), " nodes found")
+ print(all_nodes)
+ #print(len(not_found), " nodes not geolocalized")
+ #for n in not_found:
+ # print(n)
+ print('')
+ print(len(node_ip4s), " different IPv4s :")
+ for ip in node_ip4s:
+ print(ip[0], ":", str(ip[1][1]), "nodes", ("(" + ip[1][2]['city'] + ")") if ip[1][2] and ip[1][2]['city'] else "")
+ print('')
+ print(len(node_ip6s), " different IPv6s :")
+ for ip in node_ip6s:
+ print(ip[0], ":", str(ip[1][1]), "nodes", ("(" + ip[1][2]['city'] + ")") if ip[1][2] and ip[1][2]['city'] else "")
+
+ handle_tasks()
+ while run and done == 0:
+ plt.pause(.5)
+ button.on_clicked(d)
+ plt.draw()
+
+all_nodes = []
+r.join()
--- /dev/null
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ id="svg3492"
+ version="1.1"
+ inkscape:version="0.91 r13725"
+ xml:space="preserve"
+ width="256"
+ height="256"
+ viewBox="0 0 256 255.99999"
+ inkscape:export-xdpi="90"
+ inkscape:export-ydpi="90"><title
+ id="title3357">OpenDHT</title><metadata
+ id="metadata3498"><rdf:RDF><cc:Work
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title>OpenDHT</dc:title><dc:rights><cc:Agent><dc:title></dc:title></cc:Agent></dc:rights><dc:creator><cc:Agent><dc:title>Savoir-faire Linux Inc.</dc:title></cc:Agent></dc:creator><dc:relation>https://opendht.net</dc:relation></cc:Work></rdf:RDF></metadata><defs
+ id="defs3496"><clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3534"><path
+ d="m 0,0 1152,0 0,648 L 0,648 0,0 Z"
+ id="path3536"
+ inkscape:connector-curvature="0" /></clipPath></defs><sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1855"
+ inkscape:window-height="1056"
+ id="namedview3494"
+ showgrid="false"
+ units="px"
+ fit-margin-top="5"
+ fit-margin-right="5"
+ fit-margin-bottom="5"
+ fit-margin-left="5"
+ inkscape:zoom="1.9555556"
+ inkscape:cx="63.161953"
+ inkscape:cy="141.16919"
+ inkscape:window-x="1985"
+ inkscape:window-y="24"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="g3532" /><g
+ id="g3500"
+ inkscape:groupmode="layer"
+ inkscape:label="SFL-logo-DHT"
+ transform="matrix(1.25,0,0,-1.25,-589.00821,604.10631)"><g
+ id="g3530"><g
+ id="g3532"
+ clip-path="url(#clipPath3534)"><g
+ id="g3359"
+ transform="matrix(0.9931555,0,0,0.99058777,1.5489503,0.9813209)"><g
+ transform="translate(485.3798,383.7826)"
+ id="g3538"><path
+ inkscape:connector-curvature="0"
+ id="path3540"
+ style="fill:#47b3d1;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 0,0 c 0,50.03 40.554,90.586 90.586,90.586 50.028,0 90.587,-40.556 90.587,-90.586 0,-50.03 -40.559,-90.588 -90.587,-90.588 C 40.554,-90.588 0,-50.03 0,0" /></g><g
+ transform="translate(576.0338,313.2273)"
+ id="g3542"><path
+ inkscape:connector-curvature="0"
+ id="path3544"
+ style="fill:#0091ba;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 0,0 c 0,54.307 -24.009,102.977 -61.954,136.075 -17.613,-16.526 -28.632,-39.996 -28.632,-66.056 0,-49.629 39.916,-89.912 89.393,-90.555 0.396,-0.009 0.792,-0.033 1.193,-0.033 L 0,-0.082 0,0 Z" /></g><g
+ transform="translate(575.9661,292.6626)"
+ id="g3546"><path
+ inkscape:connector-curvature="0"
+ id="path3548"
+ style="fill:#007aa3;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 0,0 c -1.439,49.482 -41.105,89.355 -90.504,91.12 0,-0.178 -0.015,-0.355 -0.015,-0.536 C -90.519,40.578 -49.998,0.039 0,0" /></g><g
+ transform="translate(532.4411,337.497)"
+ id="g3550"><path
+ inkscape:connector-curvature="0"
+ id="path3552"
+ style="fill:#006382;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 0,0 c -10.555,0 -20.236,-3.754 -27.777,-10 16.565,-21.182 42.472,-34.815 71.442,-34.834 0.01,0.415 -0.072,0.821 -0.072,1.241 C 43.593,-19.516 24.077,0 0,0" /></g><g
+ transform="translate(576.0338,313.2273)"
+ id="g3554"><path
+ inkscape:connector-curvature="0"
+ id="path3556"
+ style="fill:#0091ba;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="M 0,0 C 0,54.307 24.014,102.977 61.954,136.075 79.567,119.549 90.586,96.079 90.586,70.019 90.586,20.39 50.675,-19.893 1.198,-20.536 0.797,-20.545 0.406,-20.569 0.005,-20.569 l 0,20.487 C 0.005,-0.053 0,-0.029 0,0" /></g><g
+ transform="translate(576.1063,292.6626)"
+ id="g3558"><path
+ inkscape:connector-curvature="0"
+ id="path3560"
+ style="fill:#007aa3;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 0,0 c 1.44,49.482 41.105,89.355 90.504,91.12 0,-0.178 0.01,-0.355 0.01,-0.536 C 90.514,40.578 49.998,0.039 0,0" /></g><g
+ transform="translate(619.6266,337.497)"
+ id="g3562"><path
+ inkscape:connector-curvature="0"
+ id="path3564"
+ style="fill:#006382;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 0,0 c 10.56,0 20.241,-3.754 27.782,-10 -16.57,-21.182 -42.405,-34.815 -71.375,-34.834 -0.009,0.415 0,0.821 0,1.241 C -43.593,-19.516 -24.072,0 0,0" /></g></g></g></g></g></svg>
--- /dev/null
+lib_LTLIBRARIES = libopendht.la
+
+libopendht_la_CPPFLAGS = @CPPFLAGS@ -I$(top_srcdir)/include/opendht @Argon2_CFLAGS@ @JsonCpp_CFLAGS@ @MsgPack_CFLAGS@
+libopendht_la_LIBADD = @Argon2_LIBS@ @JsonCpp_LIBS@ @GnuTLS_LIBS@ @Nettle_LIBS@
+libopendht_la_LDFLAGS = @LDFLAGS@ @Argon2_LDFLAGS@
+libopendht_la_SOURCES = \
+ dht.cpp \
+ storage.h \
+ listener.h \
+ request.h \
+ search.h \
+ value_cache.h \
+ op_cache.h \
+ op_cache.cpp \
+ net.h \
+ parsed_message.h \
+ node_cache.cpp \
+ callbacks.cpp \
+ routing_table.cpp \
+ network_engine.cpp \
+ utils.cpp \
+ infohash.cpp \
+ node.cpp \
+ value.cpp \
+ crypto.cpp \
+ securedht.cpp \
+ dhtrunner.cpp \
+ default_types.cpp \
+ log.cpp
+
+if WIN32
+libopendht_la_SOURCES += rng.cpp
+endif
+
+nobase_include_HEADERS = \
+ ../include/opendht.h \
+ ../include/opendht/def.h \
+ ../include/opendht/dht.h \
+ ../include/opendht/callbacks.h \
+ ../include/opendht/node_cache.h \
+ ../include/opendht/routing_table.h \
+ ../include/opendht/network_engine.h \
+ ../include/opendht/scheduler.h \
+ ../include/opendht/rate_limiter.h \
+ ../include/opendht/utils.h \
+ ../include/opendht/sockaddr.h \
+ ../include/opendht/infohash.h \
+ ../include/opendht/node.h \
+ ../include/opendht/value.h \
+ ../include/opendht/crypto.h \
+ ../include/opendht/securedht.h \
+ ../include/opendht/dhtrunner.h \
+ ../include/opendht/default_types.h \
+ ../include/opendht/log.h \
+ ../include/opendht/log_enable.h \
+ ../include/opendht/rng.h
+
+if ENABLE_PROXY_SERVER
+libopendht_la_SOURCES += dht_proxy_server.cpp
+nobase_include_HEADERS += ../include/opendht/dht_proxy_server.h
+endif
+
+if ENABLE_PROXY_CLIENT
+libopendht_la_SOURCES += dht_proxy_client.cpp
+nobase_include_HEADERS += ../include/opendht/dht_proxy_client.h ../include/opendht/dht_interface.h
+endif
+
+libopendht_la_SOURCES += base64.h base64.cpp
+if PROXY_CLIENT_OR_SERVER
+nobase_include_HEADERS += ../include/opendht/proxy.h
+endif
+
+if ENABLE_INDEXATION
+libopendht_la_SOURCES += indexation/pht.cpp
+nobase_include_HEADERS += ../include/opendht/indexation/pht.h
+endif
+
+clean-local:
+ rm -rf libargon2.la
+
+######################
+# ARGON2 submodule #
+######################
+
+if WITH_INCLUDED_ARGON2
+noinst_LTLIBRARIES = libargon2.la
+libopendht_la_DEPENDENCIES = libargon2.la
+
+libargon2_la_CFLAGS = -std=c89 -fPIC -pthread -O3 -Wall -I@top_builddir@/argon2/include -I@top_builddir@/argon2/src
+libargon2_la_SOURCES = \
+ @top_builddir@/argon2/src/argon2.c \
+ @top_builddir@/argon2/src/core.c \
+ @top_builddir@/argon2/src/blake2/blake2b.c \
+ @top_builddir@/argon2/src/thread.c \
+ @top_builddir@/argon2/src/ref.c \
+ @top_builddir@/argon2/src/encoding.c
+
+noinst_HEADERS = \
+ @top_builddir@/argon2/include/argon2.h \
+ @top_builddir@/argon2/src/blake2/blake2.h \
+ @top_builddir@/argon2/src/blake2/blake2-impl.h \
+ @top_builddir@/argon2/src/blake2/blamka-round-ref.h \
+ @top_builddir@/argon2/src/core.h \
+ @top_builddir@/argon2/src/encoding.h \
+ @top_builddir@/argon2/src/thread.h
+endif
--- /dev/null
+/*
+ * Copyright (C) 2004-2017 Savoir-faire Linux Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "base64.h"
+
+#include <stdint.h>
+#include <stdlib.h>
+
+/* Mainly based on the following stackoverflow question:
+ * http://stackoverflow.com/questions/342409/how-do-i-base64-encode-decode-in-c
+ */
+static const char encoding_table[] = {
+ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
+ 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
+ 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g',
+ 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',
+ 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2',
+ '3', '4', '5', '6', '7', '8', '9', '+', '/'
+};
+
+static const size_t mod_table[] = { 0, 2, 1 };
+
+char *base64_encode(const uint8_t *input, size_t input_length,
+ char *output, size_t *output_length)
+{
+ size_t i, j;
+ size_t out_sz = *output_length;
+ *output_length = 4 * ((input_length + 2) / 3);
+ if (out_sz < *output_length || output == nullptr)
+ return nullptr;
+
+ for (i = 0, j = 0; i < input_length; ) {
+ uint8_t octet_a = i < input_length ? input[i++] : 0;
+ uint8_t octet_b = i < input_length ? input[i++] : 0;
+ uint8_t octet_c = i < input_length ? input[i++] : 0;
+
+ uint32_t triple = (octet_a << 0x10) + (octet_b << 0x08) + octet_c;
+
+ output[j++] = encoding_table[(triple >> 3 * 6) & 0x3F];
+ output[j++] = encoding_table[(triple >> 2 * 6) & 0x3F];
+ output[j++] = encoding_table[(triple >> 1 * 6) & 0x3F];
+ output[j++] = encoding_table[(triple >> 0 * 6) & 0x3F];
+ }
+
+ for (i = 0; i < mod_table[input_length % 3]; i++)
+ output[*output_length - 1 - i] = '=';
+
+ return output;
+}
+
+uint8_t *base64_decode(const char *input, size_t input_length,
+ uint8_t *output, size_t *output_length)
+{
+ size_t i, j;
+ uint8_t decoding_table[256];
+
+ uint8_t c;
+ for (c = 0; c < 64; c++)
+ decoding_table[static_cast<int>(encoding_table[c])] = c;
+
+ if (input_length % 4 != 0 || input_length < 2)
+ return nullptr;
+
+ size_t out_sz = *output_length;
+ *output_length = input_length / 4 * 3;
+ if (input[input_length - 1] == '=')
+ (*output_length)--;
+ if (input[input_length - 2] == '=')
+ (*output_length)--;
+
+ if (out_sz < *output_length || output == nullptr)
+ return nullptr;
+
+ for (i = 0, j = 0; i < input_length;) {
+ uint8_t sextet_a = input[i] == '=' ? 0 & i++
+ : decoding_table[static_cast<int>(input[i++])];
+ uint8_t sextet_b = input[i] == '=' ? 0 & i++
+ : decoding_table[static_cast<int>(input[i++])];
+ uint8_t sextet_c = input[i] == '=' ? 0 & i++
+ : decoding_table[static_cast<int>(input[i++])];
+ uint8_t sextet_d = input[i] == '=' ? 0 & i++
+ : decoding_table[static_cast<int>(input[i++])];
+
+ uint32_t triple = (sextet_a << 3 * 6) +
+ (sextet_b << 2 * 6) +
+ (sextet_c << 1 * 6) +
+ (sextet_d << 0 * 6);
+
+ if (j < *output_length)
+ output[j++] = (triple >> 2 * 8) & 0xFF;
+ if (j < *output_length)
+ output[j++] = (triple >> 1 * 8) & 0xFF;
+ if (j < *output_length)
+ output[j++] = (triple >> 0 * 8) & 0xFF;
+ }
+
+ return output;
+}
+
+std::string
+base64_encode(const std::vector<uint8_t>::const_iterator begin,
+ const std::vector<uint8_t>::const_iterator end)
+{
+ size_t output_length = 4 * ((std::distance(begin, end) + 2) / 3);
+ std::string out;
+ out.resize(output_length);
+ base64_encode(&(*begin), std::distance(begin, end),
+ &(*out.begin()), &output_length);
+ out.resize(output_length);
+ return out;
+}
+
+
+std::string
+base64_encode(const std::vector<unsigned char>& str)
+{
+ return base64_encode(str.cbegin(), str.cend());
+}
+
+std::string
+base64_decode(const std::string& str)
+{
+ size_t output_length = str.length() / 4 * 3 + 2;
+ std::vector<uint8_t> output;
+ output.resize(output_length);
+ base64_decode(str.data(), str.size(), output.data(), &output_length);
+ output.resize(output_length);
+ return std::string(output.begin(), output.end());
+}
--- /dev/null
+/*
+ * Copyright (C) 2004-2017 Savoir-faire Linux Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include <string>
+#include <vector>
+
+/**
+ * Encode a buffer in base64.
+ *
+ * @param str the input buffer
+ * @return a base64-encoded buffer
+ */
+std::string base64_encode(const std::vector<unsigned char>& str);
+/**
+ * Decode a buffer in base64.
+ *
+ * @param str the input buffer
+ * @return a base64-decoded buffer
+ */
+std::string base64_decode(const std::string& str);
--- /dev/null
+#include "callbacks.h"
+
+namespace dht {
+
+
+GetCallbackSimple
+bindGetCb(GetCallbackRaw raw_cb, void* user_data)
+{
+ if (not raw_cb) return {};
+ return [=](const std::shared_ptr<Value>& value) {
+ return raw_cb(value, user_data);
+ };
+}
+
+GetCallback
+bindGetCb(GetCallbackSimple cb)
+{
+ if (not cb) return {};
+ return [=](const std::vector<std::shared_ptr<Value>>& values) {
+ for (const auto& v : values)
+ if (not cb(v))
+ return false;
+ return true;
+ };
+}
+
+ShutdownCallback
+bindShutdownCb(ShutdownCallbackRaw shutdown_cb_raw, void* user_data)
+{
+ return [=]() { shutdown_cb_raw(user_data); };
+}
+
+DoneCallback
+bindDoneCb(DoneCallbackSimple donecb)
+{
+ if (not donecb) return {};
+ using namespace std::placeholders;
+ return std::bind(donecb, _1);
+}
+
+DoneCallback
+bindDoneCb(DoneCallbackRaw raw_cb, void* user_data)
+{
+ if (not raw_cb) return {};
+ return [=](bool success, const std::vector<std::shared_ptr<Node>>& nodes) {
+ raw_cb(success, (std::vector<std::shared_ptr<Node>>*)&nodes, user_data);
+ };
+}
+
+DoneCallbackSimple
+bindDoneCbSimple(DoneCallbackSimpleRaw raw_cb, void* user_data) {
+ if (not raw_cb) return {};
+ return [=](bool success) {
+ raw_cb(success, user_data);
+ };
+}
+
+std::string
+NodeStats::toString() const
+{
+ std::stringstream ss;
+ ss << "Known nodes: " << good_nodes << " good, " << dubious_nodes << " dubious, " << incoming_nodes << " incoming." << std::endl;
+ if (table_depth > 1) {
+ ss << "Routing table depth: " << table_depth << std::endl;
+ ss << "Network size estimation: " << getNetworkSizeEstimation() << " nodes" << std::endl;
+ }
+ return ss.str();
+}
+
+#ifdef OPENDHT_JSONCPP
+/**
+ * Build a json object from a NodeStats
+ */
+Json::Value
+NodeStats::toJson() const
+{
+ Json::Value val;
+ val["good"] = static_cast<Json::LargestUInt>(good_nodes);
+ val["dubious"] = static_cast<Json::LargestUInt>(dubious_nodes);
+ val["incoming"] = static_cast<Json::LargestUInt>(incoming_nodes);
+ if (table_depth > 1) {
+ val["table_depth"] = static_cast<Json::LargestUInt>(table_depth);
+ val["network_size_estimation"] = static_cast<Json::LargestUInt>(getNetworkSizeEstimation());
+ }
+ return val;
+}
+
+NodeStats::NodeStats(const Json::Value& val)
+{
+ if (val.isMember("good"))
+ good_nodes = static_cast<unsigned>(val["good"].asLargestUInt());
+ if (val.isMember("dubious"))
+ dubious_nodes = static_cast<unsigned>(val["dubious"].asLargestUInt());
+ if (val.isMember("incoming"))
+ incoming_nodes = static_cast<unsigned>(val["incoming"].asLargestUInt());
+ if (val.isMember("table_depth"))
+ table_depth = static_cast<unsigned>(val["table_depth"].asLargestUInt());
+}
+
+/**
+ * Build a json object from a NodeStats
+ */
+Json::Value
+NodeInfo::toJson() const
+{
+ Json::Value val;
+ if (id)
+ val["id"] = id.toString();
+ val["node_id"] = node_id.toString();
+ val["ipv4"] = ipv4.toJson();
+ val["ipv6"] = ipv6.toJson();
+ return val;
+}
+
+NodeInfo::NodeInfo(const Json::Value& v)
+{
+ if (v.isMember("id"))
+ id = InfoHash(v["id"].asString());
+ node_id = InfoHash(v["node_id"].asString());
+ ipv4 = NodeStats(v["ipv4"]);
+ ipv6 = NodeStats(v["ipv6"]);
+}
+
+#endif
+
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "crypto.h"
+#include "rng.h"
+
+extern "C" {
+#include <gnutls/gnutls.h>
+#include <gnutls/abstract.h>
+#include <gnutls/x509.h>
+#include <nettle/gcm.h>
+#include <nettle/aes.h>
+
+#include <argon2.h>
+}
+
+#include <random>
+#include <sstream>
+#include <stdexcept>
+#include <cassert>
+
+#ifdef _WIN32
+static std::uniform_int_distribution<int> rand_byte{ 0, std::numeric_limits<uint8_t>::max() };
+#else
+static std::uniform_int_distribution<uint8_t> rand_byte;
+#endif
+
+static gnutls_digest_algorithm_t get_dig_for_pub(gnutls_pubkey_t pubkey)
+{
+ gnutls_digest_algorithm_t dig;
+ int result = gnutls_pubkey_get_preferred_hash_algorithm(pubkey, &dig, nullptr);
+ if (result < 0)
+ return GNUTLS_DIG_UNKNOWN;
+ return dig;
+}
+
+static gnutls_digest_algorithm_t get_dig(gnutls_x509_crt_t crt)
+{
+ gnutls_pubkey_t pubkey;
+ gnutls_pubkey_init(&pubkey);
+
+ int result = gnutls_pubkey_import_x509(pubkey, crt, 0);
+ if (result < 0) {
+ gnutls_pubkey_deinit(pubkey);
+ return GNUTLS_DIG_UNKNOWN;
+ }
+
+ gnutls_digest_algorithm_t dig = get_dig_for_pub(pubkey);
+ gnutls_pubkey_deinit(pubkey);
+ return dig;
+}
+
+// support for GnuTLS < 3.4.
+#if GNUTLS_VERSION_NUMBER < 0x030400
+#define GNUTLS_PKCS_PKCS12_3DES GNUTLS_PKCS_USE_PKCS12_3DES
+#define GNUTLS_PKCS_PKCS12_ARCFOUR GNUTLS_PKCS_USE_PKCS12_ARCFOUR
+#define GNUTLS_PKCS_PKCS12_RC2_40 GNUTLS_PKCS_USE_PKCS12_RC2_40
+#define GNUTLS_PKCS_PBES2_3DES GNUTLS_PKCS_USE_PBES2_3DES
+#define GNUTLS_PKCS_PBES2_AES_128 GNUTLS_PKCS_USE_PBES2_AES_128
+#define GNUTLS_PKCS_PBES2_AES_192 GNUTLS_PKCS_USE_PBES2_AES_192
+#define GNUTLS_PKCS_PBES2_AES_256 GNUTLS_PKCS_USE_PBES2_AES_256
+#endif
+
+namespace dht {
+namespace crypto {
+
+static constexpr std::array<size_t, 3> AES_LENGTHS {{128/8, 192/8, 256/8}};
+static constexpr size_t PASSWORD_SALT_LENGTH {16};
+
+constexpr gnutls_digest_algorithm_t gnutlsHashAlgo(size_t min_res) {
+ return (min_res > 256/8) ? GNUTLS_DIG_SHA512 : (
+ (min_res > 160/8) ? GNUTLS_DIG_SHA256 : (
+ GNUTLS_DIG_SHA1));
+}
+
+constexpr size_t gnutlsHashSize(int algo) {
+ return (algo == GNUTLS_DIG_SHA512) ? 512/8 : (
+ (algo == GNUTLS_DIG_SHA256) ? 256/8 : (
+ (algo == GNUTLS_DIG_SHA1) ? 160/8 : 0 ));
+}
+
+size_t aesKeySize(size_t max)
+{
+ size_t aes_key_len = 0;
+ for (size_t s : AES_LENGTHS) {
+ if (s <= max) aes_key_len = s;
+ else break;
+ }
+ return aes_key_len;
+}
+
+bool aesKeySizeGood(size_t key_size)
+{
+ for (auto& i : AES_LENGTHS)
+ if (key_size == i)
+ return true;
+ return false;
+}
+
+#ifndef GCM_DIGEST_SIZE
+#define GCM_DIGEST_SIZE GCM_BLOCK_SIZE
+#endif
+
+Blob aesEncrypt(const Blob& data, const Blob& key)
+{
+ if (not aesKeySizeGood(key.size()))
+ throw DecryptError("Wrong key size");
+
+ Blob ret(data.size() + GCM_IV_SIZE + GCM_DIGEST_SIZE);
+ {
+ crypto::random_device rdev;
+ std::generate_n(ret.begin(), GCM_IV_SIZE, std::bind(rand_byte, std::ref(rdev)));
+ }
+ struct gcm_aes_ctx aes;
+ gcm_aes_set_key(&aes, key.size(), key.data());
+ gcm_aes_set_iv(&aes, GCM_IV_SIZE, ret.data());
+ gcm_aes_update(&aes, data.size(), data.data());
+
+ gcm_aes_encrypt(&aes, data.size(), ret.data() + GCM_IV_SIZE, data.data());
+ gcm_aes_digest(&aes, GCM_DIGEST_SIZE, ret.data() + GCM_IV_SIZE + data.size());
+ return ret;
+}
+
+Blob aesEncrypt(const Blob& data, const std::string& password)
+{
+ Blob salt;
+ Blob key = stretchKey(password, salt, 256 / 8);
+ Blob encrypted = aesEncrypt(data, key);
+ encrypted.insert(encrypted.begin(), salt.begin(), salt.end());
+ return encrypted;
+}
+
+Blob aesDecrypt(const Blob& data, const Blob& key)
+{
+ if (not aesKeySizeGood(key.size()))
+ throw DecryptError("Wrong key size");
+
+ if (data.size() <= GCM_IV_SIZE + GCM_DIGEST_SIZE)
+ throw DecryptError("Wrong data size");
+
+ std::array<uint8_t, GCM_DIGEST_SIZE> digest;
+
+ struct gcm_aes_ctx aes;
+ gcm_aes_set_key(&aes, key.size(), key.data());
+ gcm_aes_set_iv(&aes, GCM_IV_SIZE, data.data());
+
+ size_t data_sz = data.size() - GCM_IV_SIZE - GCM_DIGEST_SIZE;
+ Blob ret(data_sz);
+ //gcm_aes_update(&aes, data_sz, data.data() + GCM_IV_SIZE);
+ gcm_aes_decrypt(&aes, data_sz, ret.data(), data.data() + GCM_IV_SIZE);
+ //gcm_aes_digest(aes, GCM_DIGEST_SIZE, digest.data());
+
+ // TODO compute the proper digest directly from the decryption pass
+ Blob ret_tmp(data_sz);
+ struct gcm_aes_ctx aes_d;
+ gcm_aes_set_key(&aes_d, key.size(), key.data());
+ gcm_aes_set_iv(&aes_d, GCM_IV_SIZE, data.data());
+ gcm_aes_update(&aes_d, ret.size(), ret.data());
+ gcm_aes_encrypt(&aes_d, ret.size(), ret_tmp.data(), ret.data());
+ gcm_aes_digest(&aes_d, GCM_DIGEST_SIZE, digest.data());
+
+ if (not std::equal(digest.begin(), digest.end(), data.end() - GCM_DIGEST_SIZE))
+ throw DecryptError("Can't decrypt data");
+
+ return ret;
+}
+
+Blob aesDecrypt(const Blob& data, const std::string& password)
+{
+ if (data.size() <= PASSWORD_SALT_LENGTH)
+ throw DecryptError("Wrong data size");
+ Blob salt {data.begin(), data.begin()+PASSWORD_SALT_LENGTH};
+ Blob key = stretchKey(password, salt, 256/8);
+ Blob encrypted {data.begin()+PASSWORD_SALT_LENGTH, data.end()};
+ return aesDecrypt(encrypted, key);
+}
+
+Blob stretchKey(const std::string& password, Blob& salt, size_t key_length)
+{
+ if (salt.empty()) {
+ salt.resize(PASSWORD_SALT_LENGTH);
+ crypto::random_device rdev;
+ std::generate_n(salt.begin(), salt.size(), std::bind(rand_byte, std::ref(rdev)));
+ }
+ Blob res;
+ res.resize(32);
+ auto ret = argon2i_hash_raw(16, 64*1024, 1, password.data(), password.size(), salt.data(), salt.size(), res.data(), res.size());
+ if (ret != ARGON2_OK)
+ throw CryptoException("Can't compute argon2i !");
+ return hash(res, key_length);
+}
+
+Blob hash(const Blob& data, size_t hash_len)
+{
+ auto algo = gnutlsHashAlgo(hash_len);
+ size_t res_size = gnutlsHashSize(algo);
+ Blob res;
+ res.resize(res_size);
+ const gnutls_datum_t gdat {(uint8_t*)data.data(), (unsigned)data.size()};
+ if (auto err = gnutls_fingerprint(algo, &gdat, res.data(), &res_size))
+ throw CryptoException(std::string("Can't compute hash: ") + gnutls_strerror(err));
+ res.resize(std::min(hash_len, res_size));
+ return res;
+}
+
+void hash(const uint8_t* data, size_t data_length, uint8_t* hash, size_t hash_length)
+{
+ auto algo = gnutlsHashAlgo(hash_length);
+ size_t res_size = hash_length;
+ const gnutls_datum_t gdat {(uint8_t*)data, (unsigned)data_length};
+ if (auto err = gnutls_fingerprint(algo, &gdat, hash, &res_size))
+ throw CryptoException(std::string("Can't compute hash: ") + gnutls_strerror(err));
+}
+
+PrivateKey::PrivateKey()
+{}
+
+PrivateKey::PrivateKey(gnutls_x509_privkey_t k) : x509_key(k)
+{
+ gnutls_privkey_init(&key);
+ if (gnutls_privkey_import_x509(key, k, GNUTLS_PRIVKEY_IMPORT_COPY) != GNUTLS_E_SUCCESS) {
+ key = nullptr;
+ throw CryptoException("Can't load generic private key !");
+ }
+}
+
+PrivateKey::PrivateKey(const Blob& import, const std::string& password)
+{
+ int err = gnutls_x509_privkey_init(&x509_key);
+ if (err != GNUTLS_E_SUCCESS)
+ throw CryptoException("Can't initialize private key !");
+
+ const gnutls_datum_t dt {(uint8_t*)import.data(), static_cast<unsigned>(import.size())};
+ const char* password_ptr = password.empty() ? nullptr : password.c_str();
+ int flags = password.empty() ? GNUTLS_PKCS_PLAIN
+ : ( GNUTLS_PKCS_PBES2_AES_128 | GNUTLS_PKCS_PBES2_AES_192 | GNUTLS_PKCS_PBES2_AES_256
+ | GNUTLS_PKCS_PKCS12_3DES | GNUTLS_PKCS_PKCS12_ARCFOUR | GNUTLS_PKCS_PKCS12_RC2_40);
+
+ err = gnutls_x509_privkey_import2(x509_key, &dt, GNUTLS_X509_FMT_PEM, password_ptr, flags);
+ if (err != GNUTLS_E_SUCCESS) {
+ int err_der = gnutls_x509_privkey_import2(x509_key, &dt, GNUTLS_X509_FMT_DER, password_ptr, flags);
+ if (err_der != GNUTLS_E_SUCCESS) {
+ gnutls_x509_privkey_deinit(x509_key);
+ if (err == GNUTLS_E_DECRYPTION_FAILED or err_der == GNUTLS_E_DECRYPTION_FAILED)
+ throw DecryptError("Can't decrypt private key");
+ else
+ throw CryptoException(std::string("Can't load private key: PEM: ") + gnutls_strerror(err)
+ + " DER: " + gnutls_strerror(err_der));
+ }
+ }
+
+ gnutls_privkey_init(&key);
+ if (gnutls_privkey_import_x509(key, x509_key, GNUTLS_PRIVKEY_IMPORT_COPY) != GNUTLS_E_SUCCESS) {
+ throw CryptoException("Can't load generic private key !");
+ }
+}
+
+PrivateKey::PrivateKey(PrivateKey&& o) noexcept : key(o.key), x509_key(o.x509_key)
+{
+ o.key = nullptr;
+ o.x509_key = nullptr;
+}
+
+PrivateKey::~PrivateKey()
+{
+ if (key) {
+ gnutls_privkey_deinit(key);
+ key = nullptr;
+ }
+ if (x509_key) {
+ gnutls_x509_privkey_deinit(x509_key);
+ x509_key = nullptr;
+ }
+}
+
+PrivateKey&
+PrivateKey::operator=(PrivateKey&& o) noexcept
+{
+ if (key) {
+ gnutls_privkey_deinit(key);
+ key = nullptr;
+ }
+ if (x509_key) {
+ gnutls_x509_privkey_deinit(x509_key);
+ x509_key = nullptr;
+ }
+ key = o.key; x509_key = o.x509_key;
+ o.key = nullptr; o.x509_key = nullptr;
+ return *this;
+}
+
+Blob
+PrivateKey::sign(const Blob& data) const
+{
+ if (!key)
+ throw CryptoException("Can't sign data: no private key set !");
+ if (std::numeric_limits<unsigned>::max() < data.size())
+ throw CryptoException("Can't sign data: too large !");
+ gnutls_datum_t sig;
+ const gnutls_datum_t dat {(unsigned char*)data.data(), (unsigned)data.size()};
+ if (gnutls_privkey_sign_data(key, GNUTLS_DIG_SHA512, 0, &dat, &sig) != GNUTLS_E_SUCCESS)
+ throw CryptoException("Can't sign data !");
+ Blob ret(sig.data, sig.data+sig.size);
+ gnutls_free(sig.data);
+ return ret;
+}
+
+Blob
+PrivateKey::decryptBloc(const uint8_t* src, size_t src_size) const
+{
+ const gnutls_datum_t dat {(uint8_t*)src, (unsigned)src_size};
+ gnutls_datum_t out;
+ int err = gnutls_privkey_decrypt_data(key, 0, &dat, &out);
+ if (err != GNUTLS_E_SUCCESS)
+ throw DecryptError(std::string("Can't decrypt data: ") + gnutls_strerror(err));
+ Blob ret {out.data, out.data+out.size};
+ gnutls_free(out.data);
+ return ret;
+}
+
+Blob
+PrivateKey::decrypt(const Blob& cipher) const
+{
+ if (!key)
+ throw CryptoException("Can't decrypt data without private key !");
+
+ unsigned key_len = 0;
+ int err = gnutls_privkey_get_pk_algorithm(key, &key_len);
+ if (err < 0)
+ throw CryptoException("Can't read public key length !");
+ if (err != GNUTLS_PK_RSA)
+ throw CryptoException("Must be an RSA key");
+
+ unsigned cypher_block_sz = key_len / 8;
+ if (cipher.size() < cypher_block_sz)
+ throw DecryptError("Unexpected cipher length");
+ else if (cipher.size() == cypher_block_sz)
+ return decryptBloc(cipher.data(), cypher_block_sz);
+
+ return aesDecrypt(Blob {cipher.begin() + cypher_block_sz, cipher.end()}, decryptBloc(cipher.data(), cypher_block_sz));
+}
+
+Blob
+PrivateKey::serialize(const std::string& password) const
+{
+ if (!x509_key)
+ return {};
+ size_t buf_sz = 8192;
+ Blob buffer;
+ buffer.resize(buf_sz);
+ int err = password.empty()
+ ? gnutls_x509_privkey_export_pkcs8(x509_key, GNUTLS_X509_FMT_PEM, nullptr, GNUTLS_PKCS_PLAIN, buffer.data(), &buf_sz)
+ : gnutls_x509_privkey_export_pkcs8(x509_key, GNUTLS_X509_FMT_PEM, password.c_str(), GNUTLS_PKCS_PBES2_AES_256, buffer.data(), &buf_sz);
+ if (err != GNUTLS_E_SUCCESS) {
+ std::cerr << "Could not export private key - " << gnutls_strerror(err) << std::endl;
+ return {};
+ }
+ buffer.resize(buf_sz);
+ return buffer;
+}
+
+PublicKey
+PrivateKey::getPublicKey() const
+{
+ gnutls_pubkey_t pk;
+ gnutls_pubkey_init(&pk);
+ PublicKey pk_ret {pk};
+ if (gnutls_pubkey_import_privkey(pk, key, GNUTLS_KEY_KEY_CERT_SIGN | GNUTLS_KEY_CRL_SIGN, 0) != GNUTLS_E_SUCCESS)
+ return {};
+ return pk_ret;
+}
+
+PublicKey::PublicKey(const Blob& dat) : pk(nullptr)
+{
+ unpack(dat.data(), dat.size());
+}
+
+PublicKey::~PublicKey()
+{
+ if (pk) {
+ gnutls_pubkey_deinit(pk);
+ pk = nullptr;
+ }
+}
+
+PublicKey&
+PublicKey::operator=(PublicKey&& o) noexcept
+{
+ if (pk)
+ gnutls_pubkey_deinit(pk);
+ pk = o.pk;
+ o.pk = nullptr;
+ return *this;
+}
+
+void
+PublicKey::pack(Blob& b) const
+{
+ if (not pk)
+ throw CryptoException(std::string("Could not export public key: null key"));
+ std::vector<uint8_t> tmp(2048);
+ size_t sz = tmp.size();
+ int err = gnutls_pubkey_export(pk, GNUTLS_X509_FMT_DER, tmp.data(), &sz);
+ if (err != GNUTLS_E_SUCCESS)
+ throw CryptoException(std::string("Could not export public key: ") + gnutls_strerror(err));
+ tmp.resize(sz);
+ b.insert(b.end(), tmp.begin(), tmp.end());
+}
+
+void
+PublicKey::unpack(const uint8_t* data, size_t data_size)
+{
+ if (pk)
+ gnutls_pubkey_deinit(pk);
+ gnutls_pubkey_init(&pk);
+ const gnutls_datum_t dat {(uint8_t*)data, (unsigned)data_size};
+ int err = gnutls_pubkey_import(pk, &dat, GNUTLS_X509_FMT_PEM);
+ if (err != GNUTLS_E_SUCCESS)
+ err = gnutls_pubkey_import(pk, &dat, GNUTLS_X509_FMT_DER);
+ if (err != GNUTLS_E_SUCCESS)
+ throw CryptoException(std::string("Could not read public key: ") + gnutls_strerror(err));
+}
+
+std::string
+PublicKey::toString() const
+{
+ if (not pk)
+ throw CryptoException(std::string("Could not print public key: null key"));
+ std::string ret;
+ size_t sz = ret.size();
+ int err = gnutls_pubkey_export(pk, GNUTLS_X509_FMT_PEM, (void*)ret.data(), &sz);
+ if (err == GNUTLS_E_SHORT_MEMORY_BUFFER) {
+ ret.resize(sz);
+ int err = gnutls_pubkey_export(pk, GNUTLS_X509_FMT_PEM, (void*)ret.data(), &sz);
+ if (err != GNUTLS_E_SUCCESS)
+ throw CryptoException(std::string("Could not print public key: ") + gnutls_strerror(err));
+ } else if (err != GNUTLS_E_SUCCESS)
+ throw CryptoException(std::string("Could not print public key: ") + gnutls_strerror(err));
+ return ret;
+}
+
+void
+PublicKey::msgpack_unpack(msgpack::object o)
+{
+ if (o.type == msgpack::type::BIN)
+ unpack((const uint8_t*)o.via.bin.ptr, o.via.bin.size);
+ else {
+ Blob dat = unpackBlob(o);
+ unpack(dat.data(), dat.size());
+ }
+}
+
+bool
+PublicKey::checkSignature(const Blob& data, const Blob& signature) const
+{
+ if (!pk)
+ return false;
+ const gnutls_datum_t sig {(uint8_t*)signature.data(), (unsigned)signature.size()};
+ const gnutls_datum_t dat {(uint8_t*)data.data(), (unsigned)data.size()};
+ int rc = gnutls_pubkey_verify_data2(pk, GNUTLS_SIGN_RSA_SHA512, 0, &dat, &sig);
+ return rc >= 0;
+}
+
+void
+PublicKey::encryptBloc(const uint8_t* src, size_t src_size, uint8_t* dst, size_t dst_size) const
+{
+ const gnutls_datum_t key_dat {(uint8_t*)src, (unsigned)src_size};
+ gnutls_datum_t encrypted;
+ auto err = gnutls_pubkey_encrypt_data(pk, 0, &key_dat, &encrypted);
+ if (err != GNUTLS_E_SUCCESS)
+ throw CryptoException(std::string("Can't encrypt data: ") + gnutls_strerror(err));
+ if (encrypted.size != dst_size)
+ throw CryptoException("Unexpected cypherblock size");
+ std::copy_n(encrypted.data, encrypted.size, dst);
+ gnutls_free(encrypted.data);
+}
+
+Blob
+PublicKey::encrypt(const Blob& data) const
+{
+ if (!pk)
+ throw CryptoException("Can't read public key !");
+
+ unsigned key_len = 0;
+ int err = gnutls_pubkey_get_pk_algorithm(pk, &key_len);
+ if (err < 0)
+ throw CryptoException("Can't read public key length !");
+ if (err != GNUTLS_PK_RSA)
+ throw CryptoException("Must be an RSA key");
+
+ const unsigned max_block_sz = key_len / 8 - 11;
+ const unsigned cypher_block_sz = key_len / 8;
+
+ /* Use plain RSA if the data is small enough */
+ if (data.size() <= max_block_sz) {
+ Blob ret(cypher_block_sz);
+ encryptBloc(data.data(), data.size(), ret.data(), cypher_block_sz);
+ return ret;
+ }
+
+ /* Otherwise use RSA+AES-GCM,
+ using the max. AES key size that can fit
+ in a single RSA packet () */
+ unsigned aes_key_sz = aesKeySize(max_block_sz);
+ if (aes_key_sz == 0)
+ throw CryptoException("Key is not long enough for AES128");
+ Blob key(aes_key_sz);
+ {
+ crypto::random_device rdev;
+ std::generate_n(key.begin(), key.size(), std::bind(rand_byte, std::ref(rdev)));
+ }
+ auto data_encrypted = aesEncrypt(data, key);
+
+ Blob ret;
+ ret.reserve(cypher_block_sz + data_encrypted.size());
+
+ ret.resize(cypher_block_sz);
+ encryptBloc(key.data(), key.size(), ret.data(), cypher_block_sz);
+ ret.insert(ret.end(), data_encrypted.begin(), data_encrypted.end());
+ return ret;
+}
+
+InfoHash
+PublicKey::getId() const
+{
+ if (not pk)
+ return {};
+ InfoHash id;
+ size_t sz = id.size();
+#if GNUTLS_VERSION_NUMBER < 0x030401
+ const int flags = 0;
+#else
+ const int flags = (id.size() == 32) ? GNUTLS_KEYID_USE_SHA256 : 0;
+#endif
+ if (auto err = gnutls_pubkey_get_key_id(pk, flags, id.data(), &sz))
+ throw CryptoException(std::string("Can't get public key ID: ") + gnutls_strerror(err));
+ if (sz != id.size())
+ throw CryptoException("Can't get public key ID: wrong output length.");
+ return id;
+}
+
+PkId
+PublicKey::getLongId() const
+{
+ if (not pk)
+ return {};
+#if GNUTLS_VERSION_NUMBER < 0x030401
+ throw CryptoException("Can't get 256 bits public key ID: GnuTLS 3.4.1 or higher required.");
+#else
+ PkId h;
+ size_t sz = h.size();
+ if (auto err = gnutls_pubkey_get_key_id(pk, GNUTLS_KEYID_USE_SHA256, h.data(), &sz))
+ throw CryptoException(std::string("Can't get 256 bits public key ID: ") + gnutls_strerror(err));
+ if (sz != h.size())
+ throw CryptoException("Can't get 256 bits public key ID: wrong output length.");
+ return h;
+#endif
+}
+
+Certificate::Certificate(const Blob& certData) : cert(nullptr)
+{
+ unpack(certData.data(), certData.size());
+}
+
+Certificate&
+Certificate::operator=(Certificate&& o) noexcept
+{
+ if (cert)
+ gnutls_x509_crt_deinit(cert);
+ cert = o.cert;
+ o.cert = nullptr;
+ issuer = std::move(o.issuer);
+ return *this;
+}
+
+void
+Certificate::unpack(const uint8_t* dat, size_t dat_size)
+{
+ if (cert) {
+ gnutls_x509_crt_deinit(cert);
+ cert = nullptr;
+ }
+ gnutls_x509_crt_t* cert_list;
+ unsigned cert_num;
+ const gnutls_datum_t crt_dt {(uint8_t*)dat, (unsigned)dat_size};
+ int err = gnutls_x509_crt_list_import2(&cert_list, &cert_num, &crt_dt, GNUTLS_X509_FMT_PEM, GNUTLS_X509_CRT_LIST_FAIL_IF_UNSORTED);
+ if (err != GNUTLS_E_SUCCESS)
+ err = gnutls_x509_crt_list_import2(&cert_list, &cert_num, &crt_dt, GNUTLS_X509_FMT_DER, GNUTLS_X509_CRT_LIST_FAIL_IF_UNSORTED);
+ if (err != GNUTLS_E_SUCCESS || cert_num == 0) {
+ cert = nullptr;
+ throw CryptoException(std::string("Could not read certificate - ") + gnutls_strerror(err));
+ }
+
+ cert = cert_list[0];
+ Certificate* crt = this;
+ size_t i = 1;
+ while (crt and i < cert_num) {
+ crt->issuer = std::make_shared<Certificate>(cert_list[i++]);
+ crt = crt->issuer.get();
+ }
+ gnutls_free(cert_list);
+}
+
+void
+Certificate::msgpack_unpack(msgpack::object o)
+{
+ if (o.type == msgpack::type::BIN)
+ unpack((const uint8_t*)o.via.bin.ptr, o.via.bin.size);
+ else {
+ Blob dat = unpackBlob(o);
+ unpack(dat.data(), dat.size());
+ }
+}
+
+void
+Certificate::pack(Blob& b) const
+{
+ const Certificate* crt = this;
+ while (crt) {
+ std::string str;
+ size_t buf_sz = 8192;
+ str.resize(buf_sz);
+ if (int err = gnutls_x509_crt_export(crt->cert, GNUTLS_X509_FMT_PEM, &(*str.begin()), &buf_sz)) {
+ std::cerr << "Could not export certificate - " << gnutls_strerror(err) << std::endl;
+ return;
+ }
+ str.resize(buf_sz);
+ b.insert(b.end(), str.begin(), str.end());
+ crt = crt->issuer.get();
+ }
+}
+
+Certificate::~Certificate()
+{
+ if (cert) {
+ gnutls_x509_crt_deinit(cert);
+ cert = nullptr;
+ }
+}
+
+PublicKey
+Certificate::getPublicKey() const
+{
+ gnutls_pubkey_t pk;
+ gnutls_pubkey_init(&pk);
+ PublicKey pk_ret(pk);
+ if (gnutls_pubkey_import_x509(pk, cert, 0) != GNUTLS_E_SUCCESS)
+ return {};
+ return pk_ret;
+}
+
+InfoHash
+Certificate::getId() const
+{
+ if (not cert)
+ return {};
+ InfoHash id;
+ size_t sz = id.size();
+ if (auto err = gnutls_x509_crt_get_key_id(cert, 0, id.data(), &sz))
+ throw CryptoException(std::string("Can't get certificate public key ID: ") + gnutls_strerror(err));
+ if (sz != id.size())
+ throw CryptoException("Can't get certificate public key ID: wrong output length.");
+ return id;
+}
+
+PkId
+Certificate::getLongId() const
+{
+ if (not cert)
+ return {};
+#if GNUTLS_VERSION_NUMBER < 0x030401
+ throw CryptoException("Can't get certificate 256 bits public key ID: GnuTLS 3.4.1 or higher required.");
+#else
+ PkId id;
+ size_t sz = id.size();
+ if (auto err = gnutls_x509_crt_get_key_id(cert, GNUTLS_KEYID_USE_SHA256, id.data(), &sz))
+ throw CryptoException(std::string("Can't get certificate 256 bits public key ID: ") + gnutls_strerror(err));
+ if (sz != id.size())
+ throw CryptoException("Can't get certificate 256 bits public key ID: wrong output length.");
+ return id;
+#endif
+}
+
+static std::string
+getDN(gnutls_x509_crt_t cert, const char* oid, bool issuer = false)
+{
+ std::string dn;
+ dn.resize(512);
+ size_t dn_sz = dn.size();
+ int ret = issuer
+ ? gnutls_x509_crt_get_issuer_dn_by_oid(cert, oid, 0, 0, &(*dn.begin()), &dn_sz)
+ : gnutls_x509_crt_get_dn_by_oid( cert, oid, 0, 0, &(*dn.begin()), &dn_sz);
+ if (ret != GNUTLS_E_SUCCESS)
+ return {};
+ dn.resize(dn_sz);
+ return dn;
+}
+
+std::string
+Certificate::getName() const
+{
+ return getDN(cert, GNUTLS_OID_X520_COMMON_NAME);
+}
+
+std::string
+Certificate::getUID() const
+{
+ return getDN(cert, GNUTLS_OID_LDAP_UID);
+}
+
+std::string
+Certificate::getIssuerName() const
+{
+ return getDN(cert, GNUTLS_OID_X520_COMMON_NAME, true);
+}
+
+std::string
+Certificate::getIssuerUID() const
+{
+ return getDN(cert, GNUTLS_OID_LDAP_UID, true);
+}
+
+static Certificate::NameType
+typeFromGnuTLS(gnutls_x509_subject_alt_name_t type)
+{
+ switch(type) {
+ case GNUTLS_SAN_DNSNAME:
+ return Certificate::NameType::DNS;
+ case GNUTLS_SAN_RFC822NAME:
+ return Certificate::NameType::RFC822;
+ case GNUTLS_SAN_URI:
+ return Certificate::NameType::URI;
+ case GNUTLS_SAN_IPADDRESS:
+ return Certificate::NameType::IP;
+ default:
+ return Certificate::NameType::UNKNOWN;
+ }
+}
+
+std::vector<std::pair<Certificate::NameType, std::string>>
+Certificate::getAltNames() const
+{
+ std::vector<std::pair<NameType, std::string>> names;
+ unsigned i = 0;
+ std::string name;
+ while (true) {
+ name.resize(512);
+ size_t name_sz = name.size();
+ unsigned type;
+ int ret = gnutls_x509_crt_get_subject_alt_name2(cert, i++, &(*name.begin()), &name_sz, &type, nullptr);
+ if (ret == GNUTLS_E_REQUESTED_DATA_NOT_AVAILABLE)
+ break;
+ name.resize(name_sz);
+ names.emplace_back(typeFromGnuTLS((gnutls_x509_subject_alt_name_t)type), name);
+ }
+ return names;
+}
+
+bool
+Certificate::isCA() const
+{
+ unsigned critical;
+ bool ca_flag = gnutls_x509_crt_get_ca_status(cert, &critical) > 0;
+ if (ca_flag) {
+ unsigned usage;
+ auto ret = gnutls_x509_crt_get_key_usage(cert, &usage, &critical);
+ /* Conforming CAs MUST include this extension in certificates that
+ contain public keys that are used to validate digital signatures on
+ other public key certificates or CRLs. */
+ if (ret < 0)
+ return false;
+ if (not critical)
+ return true;
+ return usage & GNUTLS_KEY_KEY_CERT_SIGN;
+ }
+ return false;
+}
+
+std::string
+Certificate::toString(bool chain) const
+{
+ std::ostringstream ss;
+ const Certificate* crt = this;
+ while (crt) {
+ std::string str;
+ size_t buf_sz = 8192;
+ str.resize(buf_sz);
+ if (int err = gnutls_x509_crt_export(crt->cert, GNUTLS_X509_FMT_PEM, &(*str.begin()), &buf_sz)) {
+ std::cerr << "Could not export certificate - " << gnutls_strerror(err) << std::endl;
+ return {};
+ }
+ str.resize(buf_sz);
+ ss << str;
+ if (not chain)
+ break;
+ crt = crt->issuer.get();
+ }
+ return ss.str();
+}
+
+std::string
+Certificate::print() const
+{
+ gnutls_datum_t out;
+ gnutls_x509_crt_print(cert, GNUTLS_CRT_PRINT_FULL, &out);
+ std::string ret(out.data, out.data+out.size);
+ gnutls_free(out.data);
+ return ret;
+}
+
+void
+Certificate::revoke(const PrivateKey& key, const Certificate& to_revoke)
+{
+ if (revocation_lists.empty())
+ revocation_lists.emplace(std::make_shared<RevocationList>());
+ auto& list = *(*revocation_lists.begin());
+ list.revoke(to_revoke);
+ list.sign(key, *this);
+}
+
+void
+Certificate::addRevocationList(RevocationList&& list)
+{
+ addRevocationList(std::make_shared<RevocationList>(std::forward<RevocationList>(list)));
+}
+
+void
+Certificate::addRevocationList(std::shared_ptr<RevocationList> list)
+{
+ if (revocation_lists.find(list) != revocation_lists.end())
+ return; // Already in the list
+ if (not list->isSignedBy(*this))
+ throw CryptoException("CRL is not signed by this certificate");
+ revocation_lists.emplace(std::move(list));
+}
+
+std::chrono::system_clock::time_point
+Certificate::getActivation() const
+{
+ auto t = gnutls_x509_crt_get_activation_time(cert);
+ if (t == (time_t)-1)
+ return std::chrono::system_clock::time_point::min();
+ return std::chrono::system_clock::from_time_t(t);
+}
+
+std::chrono::system_clock::time_point
+Certificate::getExpiration() const
+{
+ auto t = gnutls_x509_crt_get_expiration_time(cert);
+ if (t == (time_t)-1)
+ return std::chrono::system_clock::time_point::min();
+ return std::chrono::system_clock::from_time_t(t);
+}
+
+PrivateKey
+PrivateKey::generate(unsigned key_length)
+{
+ gnutls_x509_privkey_t key;
+ if (gnutls_x509_privkey_init(&key) != GNUTLS_E_SUCCESS)
+ throw CryptoException("Can't initialize private key.");
+ int err = gnutls_x509_privkey_generate(key, GNUTLS_PK_RSA, key_length, 0);
+ if (err != GNUTLS_E_SUCCESS) {
+ gnutls_x509_privkey_deinit(key);
+ throw CryptoException(std::string("Can't generate RSA key pair: ") + gnutls_strerror(err));
+ }
+ return PrivateKey{key};
+}
+
+PrivateKey
+PrivateKey::generateEC()
+{
+ gnutls_x509_privkey_t key;
+ if (gnutls_x509_privkey_init(&key) != GNUTLS_E_SUCCESS)
+ throw CryptoException("Can't initialize private key.");
+ int err = gnutls_x509_privkey_generate(key, GNUTLS_PK_EC, gnutls_sec_param_to_pk_bits(GNUTLS_PK_EC, GNUTLS_SEC_PARAM_ULTRA), 0);
+ if (err != GNUTLS_E_SUCCESS) {
+ gnutls_x509_privkey_deinit(key);
+ throw CryptoException(std::string("Can't generate EC key pair: ") + gnutls_strerror(err));
+ }
+ return PrivateKey{key};
+}
+
+Identity
+generateIdentity(const std::string& name, crypto::Identity ca, unsigned key_length, bool is_ca)
+{
+ auto key = std::make_shared<PrivateKey>(PrivateKey::generate(key_length));
+ auto cert = std::make_shared<Certificate>(Certificate::generate(*key, name, ca, is_ca));
+ return {std::move(key), std::move(cert)};
+}
+
+
+Identity
+generateIdentity(const std::string& name, Identity ca, unsigned key_length) {
+ return generateIdentity(name, ca, key_length, !ca.first || !ca.second);
+}
+
+Identity
+generateEcIdentity(const std::string& name, crypto::Identity ca, bool is_ca)
+{
+ auto key = std::make_shared<PrivateKey>(PrivateKey::generateEC());
+ auto cert = std::make_shared<Certificate>(Certificate::generate(*key, name, ca, is_ca));
+ return {std::move(key), std::move(cert)};
+}
+
+Identity
+generateEcIdentity(const std::string& name, Identity ca) {
+ return generateEcIdentity(name, ca, !ca.first || !ca.second);
+}
+
+Certificate
+Certificate::generate(const PrivateKey& key, const std::string& name, Identity ca, bool is_ca)
+{
+ gnutls_x509_crt_t cert;
+ if (not key.x509_key or gnutls_x509_crt_init(&cert) != GNUTLS_E_SUCCESS)
+ return {};
+ Certificate ret {cert};
+
+ int64_t now = time(NULL);
+ /* 2038 bug: don't allow time wrap */
+ auto boundTime = [](int64_t t) -> time_t {
+ return std::min<int64_t>(t, std::numeric_limits<time_t>::max());
+ };
+ gnutls_x509_crt_set_activation_time(cert, boundTime(now));
+ gnutls_x509_crt_set_expiration_time(cert, boundTime(now + (10 * 365 * 24 * 60 * 60)));
+ if (gnutls_x509_crt_set_key(cert, key.x509_key) != GNUTLS_E_SUCCESS) {
+ std::cerr << "Error when setting certificate key" << std::endl;
+ return {};
+ }
+ if (gnutls_x509_crt_set_version(cert, 3) != GNUTLS_E_SUCCESS) {
+ std::cerr << "Error when setting certificate version" << std::endl;
+ return {};
+ }
+
+ // TODO: compute the subject key using the recommended RFC method
+ auto pk_id = key.getPublicKey().getId();
+ const std::string uid_str = pk_id.toString();
+
+ gnutls_x509_crt_set_subject_key_id(cert, &pk_id, sizeof(pk_id));
+ gnutls_x509_crt_set_dn_by_oid(cert, GNUTLS_OID_X520_COMMON_NAME, 0, name.data(), name.length());
+ gnutls_x509_crt_set_dn_by_oid(cert, GNUTLS_OID_LDAP_UID, 0, uid_str.data(), uid_str.length());
+
+ {
+ random_device rdev;
+ std::uniform_int_distribution<uint64_t> dist{};
+ uint64_t cert_serial = dist(rdev);
+ gnutls_x509_crt_set_serial(cert, &cert_serial, sizeof(cert_serial));
+ }
+
+ unsigned key_usage = 0;
+ if (is_ca) {
+ gnutls_x509_crt_set_ca_status(cert, 1);
+ key_usage |= GNUTLS_KEY_KEY_CERT_SIGN | GNUTLS_KEY_CRL_SIGN;
+ } else {
+ key_usage |= GNUTLS_KEY_DIGITAL_SIGNATURE | GNUTLS_KEY_DATA_ENCIPHERMENT;
+ }
+ gnutls_x509_crt_set_key_usage(cert, key_usage);
+
+ if (ca.first && ca.second) {
+ if (not ca.second->isCA()) {
+ // Signing certificate must be CA.
+ return {};
+ }
+ //if (gnutls_x509_crt_sign2(cert, ca.second->cert, ca.first->x509_key, get_dig(cert), 0) != GNUTLS_E_SUCCESS) {
+ if (gnutls_x509_crt_privkey_sign(cert, ca.second->cert, ca.first->key, get_dig(cert), 0) != GNUTLS_E_SUCCESS) {
+ std::cerr << "Error when signing certificate" << std::endl;
+ return {};
+ }
+ ret.issuer = ca.second;
+ } else {
+ //if (gnutls_x509_crt_sign2(cert, cert, key, get_dig(cert), 0) != GNUTLS_E_SUCCESS) {
+ if (gnutls_x509_crt_privkey_sign(cert, cert, key.key, get_dig(cert), 0) != GNUTLS_E_SUCCESS) {
+ std::cerr << "Error when signing certificate" << std::endl;
+ return {};
+ }
+ }
+
+ return ret.getPacked();
+}
+
+std::vector<std::shared_ptr<RevocationList>>
+Certificate::getRevocationLists() const
+{
+ std::vector<std::shared_ptr<RevocationList>> ret;
+ ret.reserve(revocation_lists.size());
+ for (const auto& crl : revocation_lists)
+ ret.emplace_back(crl);
+ return ret;
+}
+
+RevocationList::RevocationList()
+{
+ gnutls_x509_crl_init(&crl);
+}
+
+RevocationList::RevocationList(const Blob& b)
+{
+ gnutls_x509_crl_init(&crl);
+ try {
+ unpack(b.data(), b.size());
+ } catch (const std::exception& e) {
+ gnutls_x509_crl_deinit(crl);
+ crl = nullptr;
+ throw e;
+ }
+}
+
+RevocationList::~RevocationList()
+{
+ if (crl) {
+ gnutls_x509_crl_deinit(crl);
+ crl = nullptr;
+ }
+}
+
+void
+RevocationList::pack(Blob& b) const
+{
+ gnutls_datum_t gdat {nullptr, 0};
+ if (auto err = gnutls_x509_crl_export2(crl, GNUTLS_X509_FMT_DER, &gdat)) {
+ throw CryptoException(std::string("Can't export CRL: ") + gnutls_strerror(err));
+ }
+ b.insert(b.end(), gdat.data, gdat.data + gdat.size);
+ gnutls_free(gdat.data);
+}
+
+void
+RevocationList::unpack(const uint8_t* dat, size_t dat_size)
+{
+ if (std::numeric_limits<unsigned>::max() < dat_size)
+ throw CryptoException("Can't load CRL: too large!");
+ const gnutls_datum_t gdat {(uint8_t*)dat, (unsigned)dat_size};
+ if (auto err_pem = gnutls_x509_crl_import(crl, &gdat, GNUTLS_X509_FMT_PEM))
+ if (auto err_der = gnutls_x509_crl_import(crl, &gdat, GNUTLS_X509_FMT_DER)) {
+ throw CryptoException(std::string("Can't load CRL: PEM: ") + gnutls_strerror(err_pem)
+ + " DER: " + gnutls_strerror(err_der));
+ }
+}
+
+void
+RevocationList::msgpack_unpack(msgpack::object o)
+{
+ try {
+ if (o.type == msgpack::type::BIN)
+ unpack((const uint8_t*)o.via.bin.ptr, o.via.bin.size);
+ else {
+ Blob dat = unpackBlob(o);
+ unpack(dat.data(), dat.size());
+ }
+ } catch (...) {
+ throw msgpack::type_error();
+ }
+}
+
+bool
+RevocationList::isRevoked(const Certificate& crt) const
+{
+ auto ret = gnutls_x509_crt_check_revocation(crt.cert, &crl, 1);
+ if (ret < 0)
+ throw CryptoException(std::string("Can't check certificate revocation status: ") + gnutls_strerror(ret));
+ return ret != 0;
+}
+
+void
+RevocationList::revoke(const Certificate& crt, std::chrono::system_clock::time_point t)
+{
+ if (t == time_point::min())
+ t = clock::now();
+ if (auto err = gnutls_x509_crl_set_crt(crl, crt.cert, std::chrono::system_clock::to_time_t(t)))
+ throw CryptoException(std::string("Can't revoke certificate: ") + gnutls_strerror(err));
+}
+
+static std::string
+getCRLIssuerDN(gnutls_x509_crl_t cert, const char* oid)
+{
+ std::string dn;
+ dn.resize(512);
+ size_t dn_sz = dn.size();
+ int ret = gnutls_x509_crl_get_issuer_dn_by_oid(cert, oid, 0, 0, &(*dn.begin()), &dn_sz);
+ if (ret != GNUTLS_E_SUCCESS)
+ return {};
+ dn.resize(dn_sz);
+ return dn;
+}
+
+std::string
+RevocationList::getIssuerName() const
+{
+ return getCRLIssuerDN(crl, GNUTLS_OID_X520_COMMON_NAME);
+}
+
+/** Read CRL issuer User ID (UID) */
+std::string
+RevocationList::getIssuerUID() const
+{
+ return getCRLIssuerDN(crl, GNUTLS_OID_LDAP_UID);
+}
+
+RevocationList::time_point
+RevocationList::getNextUpdateTime() const
+{
+ auto t = gnutls_x509_crl_get_next_update(crl);
+ if (t == (time_t)-1)
+ return std::chrono::system_clock::time_point::min();
+ return std::chrono::system_clock::from_time_t(t);
+}
+
+RevocationList::time_point
+RevocationList::getUpdateTime() const
+{
+ auto t = gnutls_x509_crl_get_this_update(crl);
+ if (t == (time_t)-1)
+ return std::chrono::system_clock::time_point::min();
+ return std::chrono::system_clock::from_time_t(t);
+}
+
+enum class Endian : uint32_t
+{
+ LITTLE = 0,
+ BIG = 1
+};
+
+template <typename T>
+T endian(T w, Endian endian = Endian::BIG)
+{
+ // this gets optimized out into if (endian == host_endian) return w;
+ union { uint64_t quad; uint32_t islittle; } t;
+ t.quad = 1;
+ if (t.islittle ^ (uint32_t)endian) return w;
+ T r = 0;
+
+ // decent compilers will unroll this (gcc)
+ // or even convert straight into single bswap (clang)
+ for (size_t i = 0; i < sizeof(r); i++) {
+ r <<= 8;
+ r |= w & 0xff;
+ w >>= 8;
+ }
+ return r;
+}
+
+void
+RevocationList::sign(const PrivateKey& key, const Certificate& ca, duration validity)
+{
+ if (auto err = gnutls_x509_crl_set_version(crl, 2))
+ throw CryptoException(std::string("Can't set CRL version: ") + gnutls_strerror(err));
+ auto now = std::chrono::system_clock::now();
+ auto next_update = (validity == duration{}) ? ca.getExpiration() : now + validity;
+ if (auto err = gnutls_x509_crl_set_this_update(crl, std::chrono::system_clock::to_time_t(now)))
+ throw CryptoException(std::string("Can't set CRL update time: ") + gnutls_strerror(err));
+ if (auto err = gnutls_x509_crl_set_next_update(crl, std::chrono::system_clock::to_time_t(next_update)))
+ throw CryptoException(std::string("Can't set CRL next update time: ") + gnutls_strerror(err));
+ uint64_t number {0};
+ size_t number_sz {sizeof(number)};
+ unsigned critical {0};
+ gnutls_x509_crl_get_number(crl, &number, &number_sz, &critical);
+ if (number == 0) {
+ // initialize to a random number
+ number_sz = sizeof(number);
+ random_device rdev;
+ std::generate_n((uint8_t*)&number, sizeof(number), std::bind(rand_byte, std::ref(rdev)));
+ } else
+ number = endian(endian(number) + 1);
+ if (auto err = gnutls_x509_crl_set_number(crl, &number, sizeof(number)))
+ throw CryptoException(std::string("Can't set CRL update time: ") + gnutls_strerror(err));
+ if (auto err = gnutls_x509_crl_sign2(crl, ca.cert, key.x509_key, GNUTLS_DIG_SHA512, 0))
+ throw CryptoException(std::string("Can't sign certificate revocation list: ") + gnutls_strerror(err));
+ // to be able to actually use the CRL we need to serialize/deserialize it
+ auto packed = getPacked();
+ unpack(packed.data(), packed.size());
+}
+
+bool
+RevocationList::isSignedBy(const Certificate& issuer) const
+{
+ unsigned result {0};
+ auto err = gnutls_x509_crl_verify(crl, &issuer.cert, 1, 0, &result);
+ if (err < 0) {
+ //std::cout << "Can't verify CRL: " << err << " " << result << " " << gnutls_strerror(err) << std::endl;
+ return false;
+ }
+ return result == 0;
+}
+
+
+Blob
+RevocationList::getNumber() const
+{
+ Blob number(20);
+ size_t number_sz {number.size()};
+ unsigned critical {0};
+ gnutls_x509_crl_get_number(crl, number.data(), &number_sz, &critical);
+ if (number_sz != number.size())
+ number.resize(number_sz);
+ return number;
+}
+
+std::string
+RevocationList::toString() const
+{
+ gnutls_datum_t out;
+ gnutls_x509_crl_print(crl, GNUTLS_CRT_PRINT_FULL, &out);
+ std::string ret(out.data, out.data+out.size);
+ gnutls_free(out.data);
+ return ret;
+}
+
+TrustList::TrustList() {
+ gnutls_x509_trust_list_init(&trust, 0);
+}
+
+TrustList::~TrustList() {
+ gnutls_x509_trust_list_deinit(trust, 1);
+}
+
+TrustList&
+TrustList::operator=(TrustList&& o)
+{
+ if (trust)
+ gnutls_x509_trust_list_deinit(trust, true);
+ trust = std::move(o.trust);
+ o.trust = nullptr;
+ return *this;
+}
+
+void TrustList::add(const Certificate& crt)
+{
+ auto chain = crt.getChainWithRevocations(true);
+ gnutls_x509_trust_list_add_cas(trust, chain.first.data(), chain.first.size(), GNUTLS_TL_NO_DUPLICATES);
+ if (not chain.second.empty())
+ gnutls_x509_trust_list_add_crls(
+ trust,
+ chain.second.data(), chain.second.size(),
+ GNUTLS_TL_VERIFY_CRL | GNUTLS_TL_NO_DUPLICATES, 0);
+}
+
+void TrustList::add(const RevocationList& crl)
+{
+ auto copy = crl.getCopy();
+ gnutls_x509_trust_list_add_crls(trust, ©, 1, GNUTLS_TL_VERIFY_CRL | GNUTLS_TL_NO_DUPLICATES, 0);
+}
+
+void TrustList::remove(const Certificate& crt, bool parents)
+{
+ gnutls_x509_trust_list_remove_cas(trust, &crt.cert, 1);
+ if (parents) {
+ for (auto c = crt.issuer; c; c = c->issuer)
+ gnutls_x509_trust_list_remove_cas(trust, &c->cert, 1);
+ }
+}
+
+TrustList::VerifyResult
+TrustList::verify(const Certificate& crt) const
+{
+ auto chain = crt.getChain();
+ VerifyResult ret;
+ ret.ret = gnutls_x509_trust_list_verify_crt2(
+ trust,
+ chain.data(), chain.size(),
+ nullptr, 0,
+ GNUTLS_PROFILE_TO_VFLAGS(GNUTLS_PROFILE_MEDIUM),
+ &ret.result, nullptr);
+ return ret;
+}
+
+std::string
+TrustList::VerifyResult::toString() const
+{
+ std::ostringstream ss;
+ ss << *this;
+ return ss.str();
+}
+
+std::ostream& operator<< (std::ostream& o, const TrustList::VerifyResult& h)
+{
+ if (h.ret < 0) {
+ o << "Error verifying certificate: " << gnutls_strerror(h.ret) << std::endl;
+ } else if (h.result & GNUTLS_CERT_INVALID) {
+ o << "Certificate check failed with code: " << h.result << std::endl;
+ if (h.result & GNUTLS_CERT_SIGNATURE_FAILURE)
+ o << "* The signature verification failed." << std::endl;
+ if (h.result & GNUTLS_CERT_REVOKED)
+ o << "* Certificate is revoked" << std::endl;
+ if (h.result & GNUTLS_CERT_SIGNER_NOT_FOUND)
+ o << "* Certificate's issuer is not known" << std::endl;
+ if (h.result & GNUTLS_CERT_SIGNER_NOT_CA)
+ o << "* Certificate's issuer not a CA" << std::endl;
+ if (h.result & GNUTLS_CERT_SIGNER_CONSTRAINTS_FAILURE)
+ o << "* Certificate's signer constraints were violated" << std::endl;
+ if (h.result & GNUTLS_CERT_INSECURE_ALGORITHM)
+ o << "* Certificate was signed using an insecure algorithm" << std::endl;
+ if (h.result & GNUTLS_CERT_NOT_ACTIVATED)
+ o << "* Certificate is not yet activated" << std::endl;
+ if (h.result & GNUTLS_CERT_EXPIRED)
+ o << "* Certificate has expired" << std::endl;
+ if (h.result & GNUTLS_CERT_UNEXPECTED_OWNER)
+ o << "* The owner is not the expected one" << std::endl;
+#if GNUTLS_VERSION_NUMBER >= 0x030401
+ if (h.result & GNUTLS_CERT_PURPOSE_MISMATCH)
+ o << "* Certificate or an intermediate does not match the intended purpose" << std::endl;
+#endif
+ if (h.result & GNUTLS_CERT_MISMATCH)
+ o << "* Certificate presented isn't the expected one" << std::endl;
+ } else {
+ o << "Certificate is valid" << std::endl;
+ }
+ return o;
+}
+
+}
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "default_types.h"
+
+namespace dht {
+
+std::ostream& operator<< (std::ostream& s, const DhtMessage& v)
+{
+ s << "DhtMessage: service " << v.service << std::endl;
+ return s;
+}
+
+bool
+DhtMessage::storePolicy(InfoHash h, std::shared_ptr<Value>& v, const InfoHash& f, const SockAddr& sa)
+{
+ try {
+ auto msg = unpackMsg<DhtMessage>(v->data);
+ if (msg.service.empty())
+ return false;
+ } catch (const std::exception& e) {}
+ return ValueType::DEFAULT_STORE_POLICY(h, v, f, sa);
+}
+
+Value::Filter
+DhtMessage::ServiceFilter(std::string s)
+{
+ return Value::Filter::chain(
+ Value::TypeFilter(TYPE),
+ [s](const Value& v) {
+ try {
+ return unpackMsg<DhtMessage>(v.data).service == s;
+ } catch (const std::exception& e) {
+ return false;
+ }
+ }
+ );
+}
+
+std::ostream& operator<< (std::ostream& s, const IpServiceAnnouncement& v)
+{
+ if (v.addr) {
+ s << "Peer: ";
+ s << "port " << v.getPort();
+ char hbuf[NI_MAXHOST];
+ if (getnameinfo(v.addr.get(), v.addr.getLength(), hbuf, sizeof(hbuf), nullptr, 0, NI_NUMERICHOST) == 0) {
+ s << " addr " << std::string(hbuf, strlen(hbuf));
+ }
+ }
+ return s;
+}
+
+bool
+IpServiceAnnouncement::storePolicy(InfoHash h, std::shared_ptr<Value>& v, const InfoHash& f, const SockAddr& sa)
+{
+ try {
+ auto msg = unpackMsg<IpServiceAnnouncement>(v->data);
+ if (msg.getPort() == 0)
+ return false;
+ IpServiceAnnouncement sa_addr {sa};
+ sa_addr.setPort(msg.getPort());
+ // argument v is modified (not the value).
+ v = std::make_shared<Value>(IpServiceAnnouncement::TYPE, sa_addr, v->id);
+ return ValueType::DEFAULT_STORE_POLICY(h, v, f, sa);
+ } catch (const std::exception& e) {}
+ return false;
+}
+
+const ValueType DhtMessage::TYPE(1, "DHT message", std::chrono::minutes(5), DhtMessage::storePolicy);
+const ValueType IpServiceAnnouncement::TYPE(2, "Internet Service Announcement", std::chrono::minutes(15), IpServiceAnnouncement::storePolicy);
+const ValueType ImMessage::TYPE = {3, "IM message", std::chrono::minutes(5)};
+const ValueType TrustRequest::TYPE = {4, "Certificate trust request", std::chrono::hours(24*7)};
+const ValueType IceCandidates::TYPE = {5, "ICE candidates", std::chrono::minutes(1)};
+
+const std::array<std::reference_wrapper<const ValueType>, 5>
+DEFAULT_TYPES
+{{
+ ValueType::USER_DATA,
+ DhtMessage::TYPE,
+ ImMessage::TYPE,
+ IceCandidates::TYPE,
+ TrustRequest::TYPE
+}};
+
+const std::array<std::reference_wrapper<const ValueType>, 1>
+DEFAULT_INSECURE_TYPES
+{{
+ IpServiceAnnouncement::TYPE
+}};
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2018 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ * Simon Désaulniers <simon.desaulniers@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+
+#include "dht.h"
+#include "rng.h"
+#include "search.h"
+#include "storage.h"
+#include "request.h"
+
+#include <msgpack.hpp>
+
+#include <algorithm>
+#include <random>
+#include <sstream>
+
+namespace dht {
+
+using namespace std::placeholders;
+
+constexpr std::chrono::minutes Dht::MAX_STORAGE_MAINTENANCE_EXPIRE_TIME;
+constexpr std::chrono::minutes Dht::SEARCH_EXPIRE_TIME;
+constexpr std::chrono::seconds Dht::LISTEN_EXPIRE_TIME;
+constexpr std::chrono::seconds Dht::REANNOUNCE_MARGIN;
+
+NodeStatus
+Dht::getStatus(sa_family_t af) const
+{
+ const auto& stats = getNodesStats(af);
+ if (stats.good_nodes)
+ return NodeStatus::Connected;
+ auto& ping = af == AF_INET ? pending_pings4 : pending_pings6;
+ if (ping or stats.getKnownNodes())
+ return NodeStatus::Connecting;
+ return NodeStatus::Disconnected;
+}
+
+void
+Dht::shutdown(ShutdownCallback cb)
+{
+ if (not maintain_storage) {
+ if (cb) cb();
+ return;
+ }
+
+ // Last store maintenance
+ scheduler.syncTime();
+ auto remaining = std::make_shared<int>(0);
+ auto str_donecb = [=](bool, const std::vector<Sp<Node>>&) {
+ --*remaining;
+ DHT_LOG.w("shuting down node: %u ops remaining", *remaining);
+ if (!*remaining && cb) { cb(); }
+ };
+
+ for (auto& str : store)
+ *remaining += maintainStorage(str, true, str_donecb);
+
+ if (!*remaining) {
+ DHT_LOG.w("shuting down node: %u ops remaining", *remaining);
+ if (cb)
+ cb();
+ }
+}
+
+bool
+Dht::isRunning(sa_family_t af) const { return network_engine.isRunning(af); }
+
+/* Every bucket contains an unordered list of nodes. */
+const Sp<Node>
+Dht::findNode(const InfoHash& id, sa_family_t af) const
+{
+ if (const Bucket* b = findBucket(id, af))
+ for (const auto& n : b->nodes)
+ if (n->id == id) return n;
+ return {};
+}
+
+/* Every bucket caches the address of a likely node. Ping it. */
+void
+Dht::sendCachedPing(Bucket& b)
+{
+ if (b.cached)
+ DHT_LOG.d(b.cached->id, "[node %s] sending ping to cached node", b.cached->toString().c_str());
+ b.sendCachedPing(network_engine);
+}
+
+std::vector<SockAddr>
+Dht::getPublicAddress(sa_family_t family)
+{
+ std::sort(reported_addr.begin(), reported_addr.end(), [](const ReportedAddr& a, const ReportedAddr& b) {
+ return a.first > b.first;
+ });
+ std::vector<SockAddr> ret;
+ ret.reserve(!family ? reported_addr.size() : reported_addr.size()/2);
+ for (const auto& addr : reported_addr)
+ if (!family || family == addr.second.getFamily())
+ ret.emplace_back(addr.second);
+ return ret;
+}
+
+bool
+Dht::trySearchInsert(const Sp<Node>& node)
+{
+ const auto& now = scheduler.time();
+ if (not node) return false;
+
+ auto& srs = searches(node->getFamily());
+ auto closest = srs.lower_bound(node->id);
+ bool inserted {false};
+
+ // insert forward
+ for (auto it = closest; it != srs.end(); it++) {
+ auto& s = *it->second;
+ if (s.insertNode(node, now)) {
+ inserted = true;
+ scheduler.edit(s.nextSearchStep, now);
+ } else if (not s.expired and not s.done)
+ break;
+ }
+ // insert backward
+ for (auto it = closest; it != srs.begin();) {
+ --it;
+ auto& s = *it->second;
+ if (s.insertNode(node, now)) {
+ inserted = true;
+ scheduler.edit(s.nextSearchStep, now);
+ } else if (not s.expired and not s.done)
+ break;
+ }
+ return inserted;
+}
+
+void
+Dht::reportedAddr(const SockAddr& addr)
+{
+ auto it = std::find_if(reported_addr.begin(), reported_addr.end(), [&](const ReportedAddr& a){
+ return a.second == addr;
+ });
+ if (it == reported_addr.end()) {
+ if (reported_addr.size() < 32)
+ reported_addr.emplace_back(1, addr);
+ } else
+ it->first++;
+}
+
+/* We just learnt about a node, not necessarily a new one. Confirm is 1 if
+ the node sent a message, 2 if it sent us a reply. */
+void
+Dht::onNewNode(const Sp<Node>& node, int confirm)
+{
+ const auto& now = scheduler.time();
+ auto& b = buckets(node->getFamily());
+ auto wasEmpty = confirm < 2 && b.grow_time < now - std::chrono::minutes(5);
+ if (b.onNewNode(node, confirm, now, myid, network_engine) or confirm) {
+ trySearchInsert(node);
+ if (wasEmpty) {
+ scheduler.edit(nextNodesConfirmation, now + std::chrono::seconds(1));
+ }
+ }
+}
+
+/* Called periodically to purge known-bad nodes. Note that we're very
+ conservative here: broken nodes in the table don't do much harm, we'll
+ recover as soon as we find better ones. */
+void
+Dht::expireBuckets(RoutingTable& list)
+{
+ for (auto& b : list) {
+ bool changed = false;
+ b.nodes.remove_if([&changed](const Sp<Node>& n) {
+ if (n->isExpired()) {
+ changed = true;
+ return true;
+ }
+ return false;
+ });
+ if (changed)
+ sendCachedPing(b);
+ }
+}
+
+void
+Dht::expireSearches()
+{
+ auto t = scheduler.time() - SEARCH_EXPIRE_TIME;
+ auto expired = [&](std::pair<const InfoHash, Sp<Search>>& srp) {
+ auto& sr = *srp.second;
+ auto b = sr.callbacks.empty() && sr.announce.empty() && sr.listeners.empty() && sr.step_time < t;
+ if (b) {
+ DHT_LOG.d(srp.first, "[search %s] removing search", srp.first.toString().c_str());
+ sr.clear();
+ return b;
+ } else { return false; }
+ };
+ erase_if(searches4, expired);
+ erase_if(searches6, expired);
+}
+
+void
+Dht::searchNodeGetDone(const net::Request& req,
+ net::RequestAnswer&& answer,
+ std::weak_ptr<Search> ws,
+ Sp<Query> query)
+{
+ const auto& now = scheduler.time();
+ if (auto sr = ws.lock()) {
+ sr->insertNode(req.node, now, answer.ntoken);
+ if (auto srn = sr->getNode(req.node)) {
+ /* all other get requests which are satisfied by this answer
+ should not be sent anymore */
+ for (auto& g : sr->callbacks) {
+ auto& q = g.second.query;
+ if (q->isSatisfiedBy(*query) and q != query) {
+ auto dummy_req = std::make_shared<net::Request>();
+ dummy_req->cancel();
+ srn->getStatus[q] = std::move(dummy_req);
+ }
+ }
+ auto syncTime = srn->getSyncTime(scheduler.time());
+ if (srn->syncJob)
+ scheduler.edit(srn->syncJob, syncTime);
+ else
+ srn->syncJob = scheduler.add(syncTime, std::bind(&Dht::searchStep, this, sr));
+ }
+ onGetValuesDone(req.node, answer, sr, query);
+ }
+}
+
+void
+Dht::searchNodeGetExpired(const net::Request& status,
+ bool over,
+ std::weak_ptr<Search> ws,
+ Sp<Query> query)
+{
+ if (auto sr = ws.lock()) {
+ if (auto srn = sr->getNode(status.node)) {
+ srn->candidate = not over;
+ if (over)
+ srn->getStatus.erase(query);
+ }
+ scheduler.edit(sr->nextSearchStep, scheduler.time());
+ }
+}
+
+void Dht::paginate(std::weak_ptr<Search> ws, Sp<Query> query, SearchNode* n) {
+ auto sr = ws.lock();
+ if (not sr) return;
+ auto select_q = std::make_shared<Query>(Select {}.field(Value::Field::Id), query ? query->where : Where {});
+ auto onSelectDone = [this,ws,query](const net::Request& status,
+ net::RequestAnswer&& answer) mutable {
+ // Retrieve search
+ auto sr = ws.lock();
+ if (not sr) return;
+ const auto& id = sr->id;
+ // Retrieve search node
+ auto sn = sr->getNode(status.node);
+ if (not sn) return;
+ // backward compatibility
+ if (answer.fields.empty()) {
+ searchNodeGetDone(status, std::move(answer), ws, query);
+ return;
+ }
+ for (const auto& fvi : answer.fields) {
+ try {
+ auto vid = fvi->index.at(Value::Field::Id).getInt();
+ if (vid == Value::INVALID_ID) continue;
+ auto query_for_vid = std::make_shared<Query>(Select {}, Where {}.id(vid));
+ sn->pagination_queries[query].push_back(query_for_vid);
+ DHT_LOG.d(id, sn->node->id, "[search %s] [node %s] sending %s",
+ id.toString().c_str(), sn->node->toString().c_str(), query_for_vid->toString().c_str());
+ sn->getStatus[query_for_vid] = network_engine.sendGetValues(status.node,
+ id,
+ *query_for_vid,
+ -1,
+ std::bind(&Dht::searchNodeGetDone, this, _1, _2, ws, query),
+ std::bind(&Dht::searchNodeGetExpired, this, _1, _2, ws, query_for_vid)
+ );
+ } catch (const std::out_of_range&) {
+ DHT_LOG.e(id, sn->node->id, "[search %s] [node %s] received non-id field in response to "\
+ "'SELECT id' request...",
+ id.toString().c_str(), sn->node->toString().c_str());
+ }
+ }
+ };
+ /* add pagination query key for tracking ongoing requests. */
+ n->pagination_queries[query].push_back(select_q);
+
+ DHT_LOG.d(sr->id, n->node->id, "[search %s] [node %s] sending %s",
+ sr->id.toString().c_str(), n->node->toString().c_str(), select_q->toString().c_str());
+ n->getStatus[select_q] = network_engine.sendGetValues(n->node,
+ sr->id,
+ *select_q,
+ -1,
+ onSelectDone,
+ std::bind(&Dht::searchNodeGetExpired, this, _1, _2, ws, select_q)
+ );
+}
+
+Dht::SearchNode*
+Dht::searchSendGetValues(Sp<Search> sr, SearchNode* pn, bool update)
+{
+ if (sr->done or sr->currentlySolicitedNodeCount() >= MAX_REQUESTED_SEARCH_NODES)
+ return nullptr;
+
+ const auto& now = scheduler.time();
+
+ std::weak_ptr<Search> ws = sr;
+ auto cb = sr->callbacks.begin();
+ static const auto ANY_QUERY = std::make_shared<Query>(Select {}, Where {}, true);
+ do { /* for all requests to send */
+ SearchNode* n = nullptr;
+ auto& query = sr->callbacks.empty() ? ANY_QUERY : cb->second.query;
+ const time_point up = (not sr->callbacks.empty() and update)
+ ? sr->getLastGetTime(*query)
+ : time_point::min();
+
+ if (pn and pn->canGet(now, up, query)) {
+ n = pn;
+ } else {
+ for (auto& sn : sr->nodes) {
+ if (sn.canGet(now, up, query)) {
+ n = &sn;
+ break;
+ }
+ }
+ }
+
+ if (sr->callbacks.empty()) { /* 'find_node' request */
+ if (not n)
+ return nullptr;
+
+ /*DHT_LOG.d(sr->id, n->node->id, "[search %s] [node %s] sending 'find_node'",
+ sr->id.toString().c_str(), n->node->toString().c_str());*/
+ n->getStatus[query] = network_engine.sendFindNode(n->node,
+ sr->id,
+ -1,
+ std::bind(&Dht::searchNodeGetDone, this, _1, _2, ws, query),
+ std::bind(&Dht::searchNodeGetExpired, this, _1, _2, ws, query));
+
+ } else { /* 'get' request */
+ if (not n)
+ continue;
+
+ if (query and not query->select.getSelection().empty()) {
+ /* The request contains a select. No need to paginate... */
+ /*DHT_LOG.d(sr->id, n->node->id, "[search %s] [node %s] sending 'get'",
+ sr->id.toString().c_str(), n->node->toString().c_str());*/
+ n->getStatus[query] = network_engine.sendGetValues(n->node,
+ sr->id,
+ *query,
+ -1,
+ std::bind(&Dht::searchNodeGetDone, this, _1, _2, ws, query),
+ std::bind(&Dht::searchNodeGetExpired, this, _1, _2, ws, query));
+ } else
+ paginate(ws, query, n);
+ }
+
+ /* We only try to send one request. return. */
+ return n;
+
+ } while (++cb != sr->callbacks.end());
+
+ /* no request were sent */
+ return nullptr;
+}
+
+void Dht::searchSendAnnounceValue(const Sp<Search>& sr) {
+ if (sr->announce.empty())
+ return;
+ unsigned i = 0;
+ std::weak_ptr<Search> ws = sr;
+
+ auto onDone = [this,ws](const net::Request& req, net::RequestAnswer&& answer)
+ { /* when put done */
+ if (auto sr = ws.lock()) {
+ onAnnounceDone(req.node, answer, sr);
+ searchStep(sr);
+ }
+ };
+
+ auto onExpired = [this,ws](const net::Request&, bool over)
+ { /* when put expired */
+ if (over)
+ if (auto sr = ws.lock())
+ scheduler.edit(sr->nextSearchStep, scheduler.time());
+ };
+
+ auto onSelectDone =
+ [this,ws,onDone,onExpired](const net::Request& req, net::RequestAnswer&& answer) mutable
+ { /* on probing done */
+ auto sr = ws.lock();
+ if (not sr) return;
+ const auto& now = scheduler.time();
+ sr->insertNode(req.node, scheduler.time(), answer.ntoken);
+ auto sn = sr->getNode(req.node);
+ if (not sn) return;
+
+ if (not sn->isSynced(now)) {
+ /* Search is now unsynced. Let's call searchStep to sync again. */
+ scheduler.edit(sr->nextSearchStep, now);
+ return;
+ }
+ for (auto& a : sr->announce) {
+ if (sn->getAnnounceTime(a.value->id) > now)
+ continue;
+ bool hasValue {false};
+ uint16_t seq_no = 0;
+ try {
+ const auto& f = std::find_if(answer.fields.cbegin(), answer.fields.cend(),
+ [&a](const Sp<FieldValueIndex>& i){
+ return i->index.at(Value::Field::Id).getInt() == a.value->id;
+ });
+ if (f != answer.fields.cend() and *f) {
+ hasValue = true;
+ seq_no = static_cast<uint16_t>((*f)->index.at(Value::Field::SeqNum).getInt());
+ }
+ } catch (std::out_of_range&) { }
+
+ auto next_refresh_time = now + getType(a.value->type).expiration;
+ /* only put the value if the node doesn't already have it */
+ if (not hasValue or seq_no < a.value->seq) {
+ DHT_LOG.d(sr->id, sn->node->id, "[search %s] [node %s] sending 'put' (vid: %d)",
+ sr->id.toString().c_str(), sn->node->toString().c_str(), a.value->id);
+ sn->acked[a.value->id] = std::make_pair(network_engine.sendAnnounceValue(sn->node,
+ sr->id,
+ a.value,
+ a.permanent ? time_point::max() : a.created,
+ sn->token,
+ onDone,
+ onExpired), next_refresh_time);
+ } else if (hasValue and a.permanent) {
+ DHT_LOG.w(sr->id, sn->node->id, "[search %s] [node %s] sending 'refresh' (vid: %d)",
+ sr->id.toString().c_str(), sn->node->toString().c_str(), a.value->id);
+ sn->acked[a.value->id] = std::make_pair(network_engine.sendRefreshValue(sn->node,
+ sr->id,
+ a.value->id,
+ sn->token,
+ onDone,
+ onExpired), next_refresh_time);
+ } else {
+ DHT_LOG.w(sr->id, sn->node->id, "[search %s] [node %s] already has value (vid: %d). Aborting.",
+ sr->id.toString().c_str(), sn->node->toString().c_str(), a.value->id);
+ auto ack_req = std::make_shared<net::Request>(net::Request::State::COMPLETED);
+ ack_req->reply_time = now;
+ sn->acked[a.value->id] = std::make_pair(std::move(ack_req), next_refresh_time);
+
+ /* step to clear announces */
+ scheduler.edit(sr->nextSearchStep, now);
+ }
+ if (a.permanent) {
+ scheduler.add(next_refresh_time - REANNOUNCE_MARGIN, [this,ws] {
+ if (auto sr = ws.lock()) {
+ searchStep(sr);
+ }
+ });
+ }
+ }
+ };
+
+ Sp<Query> probe_query {};
+ const auto& now = scheduler.time();
+ for (auto& n : sr->nodes) {
+ if (not n.isSynced(now))
+ continue;
+
+ const auto& gs = n.probe_query ? n.getStatus.find(n.probe_query) : n.getStatus.cend();
+ if (gs != n.getStatus.cend() and gs->second and gs->second->pending()) {
+ continue;
+ }
+
+ bool sendQuery = false;
+ for (auto& a : sr->announce) {
+ if (n.getAnnounceTime(a.value->id) <= now) {
+ if (a.permanent) {
+ sendQuery = true;
+ } else {
+ DHT_LOG.w(sr->id, n.node->id, "[search %s] [node %s] sending 'put' (vid: %d)",
+ sr->id.toString().c_str(), n.node->toString().c_str(), a.value->id);
+ n.acked[a.value->id] = {
+ network_engine.sendAnnounceValue(n.node, sr->id, a.value, a.created, n.token, onDone, onExpired),
+ now + getType(a.value->type).expiration
+ };
+ }
+ }
+ }
+
+ if (sendQuery) {
+ if (not probe_query)
+ probe_query = std::make_shared<Query>(Select {}.field(Value::Field::Id).field(Value::Field::SeqNum));
+ DHT_LOG.d(sr->id, n.node->id, "[search %s] [node %s] sending %s",
+ sr->id.toString().c_str(), n.node->toString().c_str(), probe_query->toString().c_str());
+ n.probe_query = probe_query;
+ n.getStatus[probe_query] = network_engine.sendGetValues(n.node,
+ sr->id,
+ *probe_query,
+ -1,
+ onSelectDone,
+ std::bind(&Dht::searchNodeGetExpired, this, _1, _2, ws, probe_query));
+ }
+ if (not n.candidate and ++i == TARGET_NODES)
+ break;
+ }
+}
+
+void
+Dht::searchSynchedNodeListen(const Sp<Search>& sr, SearchNode& n)
+{
+ std::weak_ptr<Search> ws = sr;
+ for (const auto& l : sr->listeners) {
+ const auto& query = l.second.query;
+ auto list_token = l.first;
+ if (n.getListenTime(query) > scheduler.time())
+ continue;
+ // DHT_LOG.d(sr->id, n.node->id, "[search %s] [node %s] sending 'listen'",
+ // sr->id.toString().c_str(), n.node->toString().c_str());
+
+ auto r = n.listenStatus.find(query);
+ if (r == n.listenStatus.end()) {
+ r = n.listenStatus.emplace(query, SearchNode::CachedListenStatus{
+ [ws,list_token](const std::vector<Sp<Value>>& values, bool expired){
+ if (auto sr = ws.lock()) {
+ auto l = sr->listeners.find(list_token);
+ if (l != sr->listeners.end()) {
+ l->second.get_cb(l->second.filter.filter(values), expired);
+ }
+ }
+ }
+ }).first;
+ auto node = n.node;
+ r->second.cacheExpirationJob = scheduler.add(time_point::max(), [this,ws,query,node]{
+ if (auto sr = ws.lock()) {
+ if (auto sn = sr->getNode(node)) {
+ sn->expireValues(query, scheduler);
+ }
+ }
+ });
+ }
+ auto prev_req = r != n.listenStatus.end() ? r->second.req : nullptr;
+ auto new_req = network_engine.sendListen(n.node, sr->id, *query, n.token, prev_req,
+ [this,ws,query](const net::Request& req, net::RequestAnswer&& answer) mutable
+ { /* on done */
+ if (auto sr = ws.lock()) {
+ scheduler.edit(sr->nextSearchStep, scheduler.time());
+ if (auto sn = sr->getNode(req.node))
+ scheduler.add(sn->getListenTime(query), std::bind(&Dht::searchStep, this, sr));
+ onListenDone(req.node, answer, sr);
+ }
+ },
+ [this,ws,query](const net::Request& req, bool over) mutable
+ { /* on request expired */
+ if (auto sr = ws.lock()) {
+ scheduler.edit(sr->nextSearchStep, scheduler.time());
+ if (over)
+ if (auto sn = sr->getNode(req.node))
+ sn->listenStatus.erase(query);
+ }
+ },
+ [this,ws,query](const Sp<Node>& node, net::RequestAnswer&& answer) mutable
+ { /* on new values */
+ if (auto sr = ws.lock()) {
+ scheduler.edit(sr->nextSearchStep, scheduler.time());
+ sr->insertNode(node, scheduler.time(), answer.ntoken);
+ if (auto sn = sr->getNode(node)) {
+ sn->onValues(query, std::move(answer), types, scheduler);
+ }
+ }
+ }
+ );
+ // Here the request may have failed and the CachedListenStatus removed
+ r = n.listenStatus.find(query);
+ if (r != n.listenStatus.end()) {
+ r->second.req = new_req;
+ }
+ }
+}
+
+/* When a search is in progress, we periodically call search_step to send
+ further requests. */
+void
+Dht::searchStep(Sp<Search> sr)
+{
+ if (not sr or sr->expired or sr->done) return;
+
+ const auto& now = scheduler.time();
+ /*if (auto req_count = sr->currentlySolicitedNodeCount())
+ DHT_LOG.d(sr->id, "[search %s IPv%c] step (%d requests)",
+ sr->id.toString().c_str(), sr->af == AF_INET ? '4' : '6', req_count);*/
+ sr->step_time = now;
+
+ if (sr->refill_time + Node::NODE_EXPIRE_TIME < now and sr->nodes.size()-sr->getNumberOfBadNodes() < SEARCH_NODES)
+ refill(*sr);
+
+ /* Check if the first TARGET_NODES (8) live nodes have replied. */
+ if (sr->isSynced(now)) {
+ if (not (sr->callbacks.empty() and sr->announce.empty())) {
+ // search is synced but some (newer) get operations are not complete
+ // Call callbacks when done
+ std::vector<Get> completed_gets;
+ for (auto b = sr->callbacks.begin(); b != sr->callbacks.end();) {
+ if (sr->isDone(b->second)) {
+ sr->setDone(b->second);
+ completed_gets.emplace_back(std::move(b->second));
+ b = sr->callbacks.erase(b);
+ }
+ else
+ ++b;
+ }
+ // clear corresponding queries
+ for (const auto& get : completed_gets)
+ for (auto& sn : sr->nodes) {
+ sn.getStatus.erase(get.query);
+ sn.pagination_queries.erase(get.query);
+ }
+
+ /* clearing callbacks for announced values */
+ sr->checkAnnounced();
+
+ if (sr->callbacks.empty() && sr->announce.empty() && sr->listeners.empty())
+ sr->setDone();
+ }
+
+ // true if this node is part of the target nodes cluter.
+ /*bool in = sr->id.xorCmp(myid, sr->nodes.back().node->id) < 0;
+
+ DHT_LOG_DBG("[search %s IPv%c] synced%s",
+ sr->id.toString().c_str(), sr->af == AF_INET ? '4' : '6', in ? ", in" : "");*/
+
+ if (not sr->listeners.empty()) {
+ unsigned i = 0;
+ for (auto& n : sr->nodes) {
+ if (not n.isSynced(now))
+ continue;
+ searchSynchedNodeListen(sr, n);
+ if (not n.candidate and ++i == LISTEN_NODES)
+ break;
+ }
+ }
+
+ // Announce requests
+ searchSendAnnounceValue(sr);
+
+ if (sr->callbacks.empty() && sr->announce.empty() && sr->listeners.empty())
+ sr->setDone();
+ }
+
+ while (sr->currentlySolicitedNodeCount() < MAX_REQUESTED_SEARCH_NODES and searchSendGetValues(sr));
+
+ if (sr->getNumberOfConsecutiveBadNodes() >= std::min(sr->nodes.size(),
+ static_cast<size_t>(SEARCH_MAX_BAD_NODES)))
+ {
+ DHT_LOG.w(sr->id, "[search %s IPv%c] expired", sr->id.toString().c_str(), sr->af == AF_INET ? '4' : '6');
+ sr->expire();
+ connectivityChanged(sr->af);
+ }
+
+ /* dumpSearch(*sr, std::cout); */
+}
+
+unsigned Dht::refill(Dht::Search& sr) {
+ const auto& now = scheduler.time();
+ sr.refill_time = now;
+ /* we search for up to SEARCH_NODES good nodes. */
+ auto cached_nodes = network_engine.getCachedNodes(sr.id, sr.af, SEARCH_NODES);
+
+ if (cached_nodes.empty()) {
+ DHT_LOG.e(sr.id, "[search %s IPv%c] no nodes from cache while refilling search",
+ sr.id.toString().c_str(), (sr.af == AF_INET) ? '4' : '6');
+ return 0;
+ }
+
+ unsigned inserted = 0;
+ for (auto& i : cached_nodes) {
+ /* try to insert the nodes. Search::insertNode will know how many to insert. */
+ if (sr.insertNode(i, now))
+ ++inserted;
+ }
+ DHT_LOG.d(sr.id, "[search %s IPv%c] refilled search with %u nodes from node cache",
+ sr.id.toString().c_str(), (sr.af == AF_INET) ? '4' : '6', inserted);
+ return inserted;
+}
+
+
+/* Start a search. */
+Sp<Dht::Search>
+Dht::search(const InfoHash& id, sa_family_t af, GetCallback gcb, QueryCallback qcb, DoneCallback dcb, Value::Filter f, const Sp<Query>& q)
+{
+ if (!isRunning(af)) {
+ DHT_LOG.e(id, "[search %s IPv%c] unsupported protocol", id.toString().c_str(), (af == AF_INET) ? '4' : '6');
+ if (dcb)
+ dcb(false, {});
+ return {};
+ }
+
+ auto& srs = searches(af);
+ const auto& srp = srs.find(id);
+ Sp<Search> sr {};
+
+ if (srp != srs.end()) {
+ sr = srp->second;
+ sr->done = false;
+ sr->expired = false;
+ } else {
+ if (searches4.size() + searches6.size() < MAX_SEARCHES) {
+ sr = std::make_shared<Search>();
+ srs.emplace(id, sr);
+ } else {
+ for (auto it = srs.begin(); it!=srs.end();) {
+ auto& s = *it->second;
+ if ((s.done or s.expired) and s.announce.empty() and s.listeners.empty()) {
+ sr = it->second;
+ break;
+ }
+ }
+ if (not sr) {
+ DHT_LOG.e(id, "[search %s IPv%c] maximum number of searches reached !", id.toString().c_str(), (af == AF_INET) ? '4' : '6');
+ return {};
+ }
+ }
+ sr->af = af;
+ sr->tid = search_id++;
+ sr->step_time = time_point::min();
+ sr->id = id;
+ sr->done = false;
+ sr->expired = false;
+ sr->nodes.clear();
+ sr->nodes.reserve(SEARCH_NODES+1);
+ sr->nextSearchStep = scheduler.add(time_point::max(), std::bind(&Dht::searchStep, this, sr));
+ DHT_LOG.w(id, "[search %s IPv%c] new search", id.toString().c_str(), (af == AF_INET) ? '4' : '6');
+ if (search_id == 0)
+ search_id++;
+ }
+
+ sr->get(f, q, qcb, gcb, dcb, scheduler);
+ refill(*sr);
+
+ return sr;
+}
+
+void
+Dht::announce(const InfoHash& id,
+ sa_family_t af,
+ Sp<Value> value,
+ DoneCallback callback,
+ time_point created,
+ bool permanent)
+{
+ auto& srs = searches(af);
+ auto srp = srs.find(id);
+ auto sr = srp == srs.end() ? search(id, af) : srp->second;
+ if (!sr) {
+ if (callback)
+ callback(false, {});
+ return;
+ }
+ sr->done = false;
+ sr->expired = false;
+ auto a_sr = std::find_if(sr->announce.begin(), sr->announce.end(), [&](const Announce& a){
+ return a.value->id == value->id;
+ });
+ if (a_sr == sr->announce.end()) {
+ sr->announce.emplace_back(Announce {permanent, value, created, callback});
+ for (auto& n : sr->nodes) {
+ n.probe_query.reset();
+ n.acked[value->id].first.reset();
+ }
+ } else {
+ a_sr->permanent = permanent;
+ a_sr->created = created;
+ if (a_sr->value != value) {
+ a_sr->value = value;
+ for (auto& n : sr->nodes) {
+ n.acked[value->id].first.reset();
+ n.probe_query.reset();
+ }
+ }
+ if (sr->isAnnounced(value->id)) {
+ if (a_sr->callback)
+ a_sr->callback(true, {});
+ a_sr->callback = {};
+ if (callback)
+ callback(true, {});
+ return;
+ } else {
+ if (a_sr->callback)
+ a_sr->callback(false, {});
+ a_sr->callback = callback;
+ }
+ }
+ scheduler.edit(sr->nextSearchStep, scheduler.time());
+}
+
+size_t
+Dht::listenTo(const InfoHash& id, sa_family_t af, ValueCallback cb, Value::Filter f, const Sp<Query>& q)
+{
+ if (!isRunning(af))
+ return 0;
+ // DHT_LOG_ERR("[search %s IPv%c] search_time is now in %lfs", sr->id.toString().c_str(), (sr->af == AF_INET) ? '4' : '6', print_dt(tm-clock::now()));
+
+ //DHT_LOG_WARN("listenTo %s", id.toString().c_str());
+ auto& srs = searches(af);
+ auto srp = srs.find(id);
+ Sp<Search> sr = (srp == srs.end()) ? search(id, af) : srp->second;
+ if (!sr)
+ throw DhtException("Can't create search");
+ DHT_LOG.e(id, "[search %s IPv%c] listen", id.toString().c_str(), (af == AF_INET) ? '4' : '6');
+ return sr->listen(cb, f, q, scheduler);
+}
+
+size_t
+Dht::listen(const InfoHash& id, ValueCallback cb, Value::Filter f, Where where)
+{
+ scheduler.syncTime();
+
+ Query q {{}, where};
+ auto vals = std::make_shared<std::map<Value::Id, Sp<Value>>>();
+ auto token = ++listener_token;
+
+ auto gcb = OpValueCache::cacheCallback(std::move(cb), [this, id, token]{
+ cancelListen(id, token);
+ });
+
+ auto query = std::make_shared<Query>(q);
+ auto filter = f.chain(q.where.getFilter());
+ size_t tokenlocal = 0;
+ auto st = store.find(id);
+ if (st == store.end() && store.size() < MAX_HASHES)
+ st = store.emplace(id, scheduler.time() + MAX_STORAGE_MAINTENANCE_EXPIRE_TIME).first;
+ if (st != store.end()) {
+ if (not st->second.empty()) {
+ std::vector<Sp<Value>> newvals = st->second.get(filter);
+ if (not newvals.empty()) {
+ if (!gcb(newvals, false))
+ return 0;
+ for (const auto& v : newvals) {
+ auto it = vals->emplace(v->id, v);
+ if (not it.second)
+ it.first->second = v;
+ }
+ }
+ }
+ tokenlocal = ++st->second.listener_token;
+ st->second.local_listeners.emplace(tokenlocal, LocalListener{query, filter, gcb});
+ }
+
+ auto token4 = Dht::listenTo(id, AF_INET, gcb, filter, query);
+ auto token6 = token4 == 0 ? 0 : Dht::listenTo(id, AF_INET6, gcb, filter, query);
+ if (token6 == 0 && st != store.end()) {
+ st->second.local_listeners.erase(tokenlocal);
+ return 0;
+ }
+
+ listeners.emplace(token, std::make_tuple(tokenlocal, token4, token6));
+ return token;
+}
+
+bool
+Dht::cancelListen(const InfoHash& id, size_t token)
+{
+ scheduler.syncTime();
+
+ auto it = listeners.find(token);
+ if (it == listeners.end()) {
+ DHT_LOG.w(id, "Listen token not found: %d", token);
+ return false;
+ }
+ DHT_LOG.d(id, "cancelListen %s with token %d", id.toString().c_str(), token);
+ auto st = store.find(id);
+ auto tokenlocal = std::get<0>(it->second);
+ if (st != store.end() && tokenlocal)
+ st->second.local_listeners.erase(tokenlocal);
+
+ auto searches_cancel_listen = [this,&id](std::map<InfoHash, Sp<Search>>& srs, size_t token) {
+ auto srp = srs.find(id);
+ if (srp != srs.end() and token)
+ srp->second->cancelListen(token, scheduler);
+ };
+ searches_cancel_listen(searches4, std::get<1>(it->second));
+ searches_cancel_listen(searches6, std::get<2>(it->second));
+ listeners.erase(it);
+ return true;
+}
+
+struct OpStatus {
+ struct Status {
+ bool done {false};
+ bool ok {false};
+ Status(bool done=false, bool ok=false) : done(done), ok(ok) {}
+ };
+ Status status;
+ Status status4;
+ Status status6;
+};
+
+template <typename T>
+struct GetStatus : public OpStatus {
+ std::vector<Sp<T>> values;
+ std::vector<Sp<Node>> nodes;
+};
+
+void
+Dht::put(const InfoHash& id, Sp<Value> val, DoneCallback callback, time_point created, bool permanent)
+{
+ if (not val) {
+ if (callback)
+ callback(false, {});
+ return;
+ }
+ if (val->id == Value::INVALID_ID) {
+ crypto::random_device rdev;
+ std::uniform_int_distribution<Value::Id> rand_id {};
+ val->id = rand_id(rdev);
+ }
+ scheduler.syncTime();
+ const auto& now = scheduler.time();
+ created = std::min(now, created);
+ storageStore(id, val, created, {}, permanent);
+
+ DHT_LOG.d(id, "put: adding %s -> %s", id.toString().c_str(), val->toString().c_str());
+
+ auto op = std::make_shared<OpStatus>();
+ auto donecb = [callback](const std::vector<Sp<Node>>& nodes, OpStatus& op) {
+ // Callback as soon as the value is announced on one of the available networks
+ if (callback and not op.status.done and (op.status4.done && op.status6.done)) {
+ callback(op.status4.ok or op.status6.ok, nodes);
+ op.status.done = true;
+ }
+ };
+ announce(id, AF_INET, val, [=](bool ok4, const std::vector<Sp<Node>>& nodes) {
+ DHT_LOG.d(id, "Announce done IPv4 %d", ok4);
+ auto& o = *op;
+ o.status4 = {true, ok4};
+ donecb(nodes, o);
+ }, created, permanent);
+ announce(id, AF_INET6, val, [=](bool ok6, const std::vector<Sp<Node>>& nodes) {
+ DHT_LOG.d(id, "Announce done IPv6 %d", ok6);
+ auto& o = *op;
+ o.status6 = {true, ok6};
+ donecb(nodes, o);
+ }, created, permanent);
+}
+
+template <typename T>
+void doneCallbackWrapper(DoneCallback dcb, const std::vector<Sp<Node>>& nodes, GetStatus<T>& op) {
+ if (op.status.done)
+ return;
+ op.nodes.insert(op.nodes.end(), nodes.begin(), nodes.end());
+ if (op.status.ok or (op.status4.done and op.status6.done)) {
+ bool ok = op.status.ok or op.status4.ok or op.status6.ok;
+ op.status.done = true;
+ if (dcb)
+ dcb(ok, op.nodes);
+ }
+}
+
+template <typename T, typename Cb>
+bool callbackWrapper(Cb get_cb,
+ DoneCallback done_cb,
+ const std::vector<Sp<T>>& values,
+ std::function<std::vector<Sp<T>>(const std::vector<Sp<T>>&)> add_values,
+ Sp<GetStatus<T>> o)
+{
+ auto& op = *o;
+ if (op.status.done)
+ return false;
+ auto newvals = add_values(values);
+ if (not newvals.empty()) {
+ op.status.ok = !get_cb(newvals);
+ op.values.insert(op.values.end(), newvals.begin(), newvals.end());
+ }
+ doneCallbackWrapper(done_cb, {}, op);
+ return !op.status.ok;
+}
+
+void
+Dht::get(const InfoHash& id, GetCallback getcb, DoneCallback donecb, Value::Filter&& filter, Where&& where)
+{
+ scheduler.syncTime();
+
+ auto q = std::make_shared<Query>(Select {}, std::move(where));
+ auto op = std::make_shared<GetStatus<Value>>();
+ auto f = filter.chain(q->where.getFilter());
+
+ auto add_values = [op,f](const std::vector<Sp<Value>>& values) {
+ std::vector<Sp<Value>> newvals {};
+ for (const auto& v : values) {
+ auto it = std::find_if(op->values.cbegin(), op->values.cend(), [&](const Sp<Value>& sv) {
+ return sv == v or *sv == *v;
+ });
+ if (it == op->values.cend()) {
+ if (not f or f(*v))
+ newvals.push_back(v);
+ }
+ }
+ return newvals;
+ };
+ auto gcb = std::bind(callbackWrapper<Value, GetCallback>, getcb, donecb, _1, add_values, op);
+
+ /* Try to answer this search locally. */
+ gcb(getLocal(id, f));
+
+ Dht::search(id, AF_INET, gcb, {}, [=](bool ok, const std::vector<Sp<Node>>& nodes) {
+ //DHT_LOG_WARN("DHT done IPv4");
+ op->status4 = {true, ok};
+ doneCallbackWrapper(donecb, nodes, *op);
+ }, f, q);
+ Dht::search(id, AF_INET6, gcb, {}, [=](bool ok, const std::vector<Sp<Node>>& nodes) {
+ //DHT_LOG_WARN("DHT done IPv6");
+ op->status6 = {true, ok};
+ doneCallbackWrapper(donecb, nodes, *op);
+ }, f, q);
+}
+
+void Dht::query(const InfoHash& id, QueryCallback cb, DoneCallback done_cb, Query&& q)
+{
+ scheduler.syncTime();
+ auto op = std::make_shared<GetStatus<FieldValueIndex>>();
+
+ auto f = q.where.getFilter();
+ auto values = getLocal(id, f);
+ auto add_fields = [=](const std::vector<Sp<FieldValueIndex>>& fields) {
+ std::vector<Sp<FieldValueIndex>> newvals {};
+ for (const auto& f : fields) {
+ auto it = std::find_if(op->values.cbegin(), op->values.cend(),
+ [&](const Sp<FieldValueIndex>& sf) {
+ return sf == f or f->containedIn(*sf);
+ });
+ if (it == op->values.cend()) {
+ auto lesser = std::find_if(op->values.begin(), op->values.end(),
+ [&](const Sp<FieldValueIndex>& sf) {
+ return sf->containedIn(*f);
+ });
+ if (lesser != op->values.end())
+ op->values.erase(lesser);
+ newvals.push_back(f);
+ }
+ }
+ return newvals;
+ };
+ std::vector<Sp<FieldValueIndex>> local_fields(values.size());
+ std::transform(values.begin(), values.end(), local_fields.begin(), [&q](const Sp<Value>& v) {
+ return std::make_shared<FieldValueIndex>(*v, q.select);
+ });
+ auto qcb = std::bind(callbackWrapper<FieldValueIndex, QueryCallback>, cb, done_cb, _1, add_fields, op);
+
+ /* Try to answer this search locally. */
+ qcb(local_fields);
+
+ auto sq = std::make_shared<Query>(std::move(q));
+ Dht::search(id, AF_INET, {}, qcb, [=](bool ok, const std::vector<Sp<Node>>& nodes) {
+ //DHT_LOG_WARN("DHT done IPv4");
+ op->status4 = {true, ok};
+ doneCallbackWrapper(done_cb, nodes, *op);
+ }, f, sq);
+ Dht::search(id, AF_INET6, {}, qcb, [=](bool ok, const std::vector<Sp<Node>>& nodes) {
+ //DHT_LOG_WARN("DHT done IPv6");
+ op->status6 = {true, ok};
+ doneCallbackWrapper(done_cb, nodes, *op);
+ }, f, sq);
+}
+
+std::vector<Sp<Value>>
+Dht::getLocal(const InfoHash& id, Value::Filter f) const
+{
+ auto s = store.find(id);
+ if (s == store.end()) return {};
+ return s->second.get(f);
+}
+
+Sp<Value>
+Dht::getLocalById(const InfoHash& id, Value::Id vid) const
+{
+ auto s = store.find(id);
+ if (s != store.end())
+ return s->second.getById(vid);
+ return {};
+}
+
+std::vector<Sp<Value>>
+Dht::getPut(const InfoHash& id)
+{
+ std::vector<Sp<Value>> ret;
+ auto find_values = [&](std::map<InfoHash, Sp<Search>> srs) {
+ auto srp = srs.find(id);
+ if (srp == srs.end())
+ return;
+ auto& search = srp->second;
+ ret.reserve(ret.size() + search->announce.size());
+ for (const auto& a : search->announce)
+ ret.push_back(a.value);
+ };
+ find_values(searches4);
+ find_values(searches6);
+ return ret;
+}
+
+Sp<Value>
+Dht::getPut(const InfoHash& id, const Value::Id& vid)
+{
+ auto find_value = [&](std::map<InfoHash, Sp<Search>> srs) {
+ auto srp = srs.find(id);
+ if (srp == srs.end())
+ return Sp<Value> {};
+ auto& search = srp->second;
+ for (auto& a : search->announce) {
+ if (a.value->id == vid)
+ return a.value;
+ }
+ return Sp<Value> {};
+ };
+ if (auto v4 = find_value(searches4))
+ return v4;
+ if (auto v6 = find_value(searches6))
+ return v6;
+ return {};
+}
+
+bool
+Dht::cancelPut(const InfoHash& id, const Value::Id& vid)
+{
+ bool canceled {false};
+ if (storageErase(id, vid))
+ canceled = true;
+ auto sr_cancel_put = [&](std::map<InfoHash, Sp<Search>> srs) {
+ auto srp = srs.find(id);
+ if (srp == srs.end())
+ return;
+
+ auto& sr = srp->second;
+ for (auto it = sr->announce.begin(); it != sr->announce.end();) {
+ if (it->value->id == vid) {
+ canceled = true;
+ it = sr->announce.erase(it);
+ }
+ else
+ ++it;
+ }
+ };
+ sr_cancel_put(searches4);
+ sr_cancel_put(searches6);
+ return canceled;
+}
+
+
+// Storage
+
+void
+Dht::storageChanged(const InfoHash& id, Storage& st, ValueStorage& v, bool newValue)
+{
+ if (newValue) {
+ if (not st.local_listeners.empty()) {
+ DHT_LOG.d(id, "[store %s] %lu local listeners", id.toString().c_str(), st.local_listeners.size());
+ std::vector<std::pair<ValueCallback, std::vector<Sp<Value>>>> cbs;
+ for (const auto& l : st.local_listeners) {
+ std::vector<Sp<Value>> vals;
+ if (not l.second.filter or l.second.filter(*v.data))
+ vals.push_back(v.data);
+ if (not vals.empty()) {
+ DHT_LOG.d(id, "[store %s] sending update local listener with token %lu",
+ id.toString().c_str(),
+ l.first);
+ cbs.emplace_back(l.second.get_cb, std::move(vals));
+ }
+ }
+ // listeners are copied: they may be deleted by the callback
+ for (auto& cb : cbs)
+ cb.first(cb.second, false);
+ }
+ }
+
+ if (not st.listeners.empty()) {
+ DHT_LOG.d(id, "[store %s] %lu remote listeners", id.toString().c_str(), st.listeners.size());
+ for (const auto& node_listeners : st.listeners) {
+ for (const auto& l : node_listeners.second) {
+ auto f = l.second.query.where.getFilter();
+ if (f and not f(*v.data))
+ continue;
+ DHT_LOG.w(id, node_listeners.first->id, "[store %s] [node %s] sending update",
+ id.toString().c_str(),
+ node_listeners.first->toString().c_str());
+ std::vector<Sp<Value>> vals {};
+ vals.push_back(v.data);
+ Blob ntoken = makeToken(node_listeners.first->getAddr(), false);
+ network_engine.tellListener(node_listeners.first, l.first, id, 0, ntoken, {}, {},
+ std::move(vals), l.second.query);
+ }
+ }
+ }
+}
+
+bool
+Dht::storageStore(const InfoHash& id, const Sp<Value>& value, time_point created, const SockAddr& sa, bool permanent)
+{
+ const auto& now = scheduler.time();
+ created = std::min(created, now);
+ auto expiration = permanent ? time_point::max() : created + getType(value->type).expiration;
+ if (expiration < now)
+ return false;
+
+ auto st = store.find(id);
+ if (st == store.end()) {
+ if (store.size() >= MAX_HASHES)
+ return false;
+ auto st_i = store.emplace(id, now);
+ st = st_i.first;
+ if (maintain_storage and st_i.second)
+ scheduler.add(st->second.maintenance_time, std::bind(&Dht::dataPersistence, this, id));
+ }
+
+ StorageBucket* store_bucket {nullptr};
+ if (sa)
+ store_bucket = &store_quota.emplace(sa, StorageBucket{}).first->second;
+
+ auto store = st->second.store(id, value, created, expiration, store_bucket);
+ if (auto vs = store.first) {
+ total_store_size += store.second.size_diff;
+ total_values += store.second.values_diff;
+ if (not permanent) {
+ scheduler.add(expiration, std::bind(&Dht::expireStorage, this, id));
+ }
+ if (total_store_size > max_store_size) {
+ expireStore();
+ }
+ storageChanged(id, st->second, *vs, store.second.values_diff > 0);
+ }
+
+ return std::get<0>(store);
+}
+
+bool
+Dht::storageErase(const InfoHash& id, Value::Id vid)
+{
+ auto st = store.find(id);
+ if (st == store.end())
+ return false;
+ auto ret = st->second.remove(id, vid);
+ total_store_size += ret.size_diff;
+ total_values += ret.values_diff;
+ return ret.values_diff;
+}
+
+void
+Dht::storageAddListener(const InfoHash& id, const Sp<Node>& node, size_t socket_id, Query&& query)
+{
+ const auto& now = scheduler.time();
+ auto st = store.find(id);
+ if (st == store.end()) {
+ if (store.size() >= MAX_HASHES)
+ return;
+ st = store.emplace(id, now).first;
+ }
+ auto node_listeners = st->second.listeners.emplace(node, std::map<size_t, Listener> {}).first;
+ auto l = node_listeners->second.find(socket_id);
+ if (l == node_listeners->second.end()) {
+ auto vals = st->second.get(query.where.getFilter());
+ if (not vals.empty()) {
+ network_engine.tellListener(node, socket_id, id, WANT4 | WANT6, makeToken(node->getAddr(), false),
+ buckets4.findClosestNodes(id, now, TARGET_NODES), buckets6.findClosestNodes(id, now, TARGET_NODES),
+ std::move(vals), query);
+ }
+ node_listeners->second.emplace(socket_id, Listener {now, std::forward<Query>(query)});
+ }
+ else
+ l->second.refresh(now, std::forward<Query>(query));
+}
+
+void
+Dht::expireStore(decltype(store)::iterator i)
+{
+ const auto& id = i->first;
+ auto& st = i->second;
+ auto stats = st.expire(id, scheduler.time());
+ total_store_size += stats.first;
+ total_values -= stats.second.size();
+ if (not stats.second.empty()) {
+ DHT_LOG.d(id, "[store %s] discarded %ld expired values (%ld bytes)",
+ id.toString().c_str(), stats.second.size(), -stats.first);
+
+ if (not st.listeners.empty()) {
+ DHT_LOG.d(id, "[store %s] %lu remote listeners", id.toString().c_str(), st.listeners.size());
+
+ std::vector<Value::Id> ids;
+ ids.reserve(stats.second.size());
+ for (const auto& v : stats.second)
+ ids.emplace_back(v->id);
+
+ for (const auto& node_listeners : st.listeners) {
+ for (const auto& l : node_listeners.second) {
+ DHT_LOG.w(id, node_listeners.first->id, "[store %s] [node %s] sending expired",
+ id.toString().c_str(),
+ node_listeners.first->toString().c_str());
+ Blob ntoken = makeToken(node_listeners.first->getAddr(), false);
+ network_engine.tellListenerExpired(node_listeners.first, l.first, id, ntoken, ids);
+ }
+ }
+ }
+ for (const auto& local_listeners : st.local_listeners) {
+ local_listeners.second.get_cb(stats.second, true);
+ }
+ }
+}
+
+void
+Dht::expireStorage(InfoHash h)
+{
+ auto i = store.find(h);
+ if (i != store.end())
+ expireStore(i);
+}
+
+void
+Dht::expireStore()
+{
+ // removing expired values
+ for (auto i = store.begin(); i != store.end();) {
+ expireStore(i);
+
+ if (i->second.empty() && i->second.listeners.empty() && i->second.local_listeners.empty()) {
+ DHT_LOG.d(i->first, "[store %s] discarding empty storage", i->first.toString().c_str());
+ i = store.erase(i);
+ }
+ else
+ ++i;
+ }
+
+ // remove more values if storage limit is exceeded
+ while (total_store_size > max_store_size) {
+ // find IP using the most storage
+ if (store_quota.empty()) {
+ DHT_LOG.w("No space left: local data consumes all the quota!");
+ break;
+ }
+ auto largest = store_quota.begin();
+ for (auto it = ++largest; it != store_quota.end(); ++it) {
+ if (it->second.size() > largest->second.size())
+ largest = it;
+ }
+ DHT_LOG.w("No space left: discarding value of largest consumer %s", largest->first.toString().c_str());
+ while (true) {
+ auto exp_value = largest->second.getOldest();
+ auto storage = store.find(exp_value.first);
+ if (storage != store.end()) {
+ auto ret = storage->second.remove(exp_value.first, exp_value.second);
+ total_store_size += ret.size_diff;
+ total_values += ret.values_diff;
+ DHT_LOG.w("Discarded %ld bytes, still %ld used", largest->first.toString().c_str(), total_store_size);
+ if (ret.values_diff)
+ break;
+ }
+ }
+ }
+
+ // remove unused quota entires
+ for (auto i = store_quota.begin(); i != store_quota.end();) {
+ if (i->second.size() == 0)
+ i = store_quota.erase(i);
+ else
+ ++i;
+ }
+}
+
+void
+Dht::connectivityChanged(sa_family_t af)
+{
+ const auto& now = scheduler.time();
+ scheduler.edit(nextNodesConfirmation, now);
+ buckets(af).connectivityChanged(now);
+ network_engine.connectivityChanged(af);
+ reported_addr.erase(std::remove_if(reported_addr.begin(), reported_addr.end(), [&](const ReportedAddr& addr){
+ return addr.second.getFamily() == af;
+ }), reported_addr.end());
+}
+
+void
+Dht::rotateSecrets()
+{
+ oldsecret = secret;
+ {
+ crypto::random_device rdev;
+ secret = std::uniform_int_distribution<uint64_t>{}(rdev);
+ }
+ uniform_duration_distribution<> time_dist(std::chrono::minutes(15), std::chrono::minutes(45));
+ auto rotate_secrets_time = scheduler.time() + time_dist(rd);
+ scheduler.add(rotate_secrets_time, std::bind(&Dht::rotateSecrets, this));
+}
+
+Blob
+Dht::makeToken(const SockAddr& addr, bool old) const
+{
+ const void *ip;
+ size_t iplen;
+ in_port_t port;
+
+ auto family = addr.getFamily();
+ if (family == AF_INET) {
+ const auto& sin = addr.getIPv4();
+ ip = &sin.sin_addr;
+ iplen = 4;
+ port = sin.sin_port;
+ } else if (family == AF_INET6) {
+ const auto& sin6 = addr.getIPv6();
+ ip = &sin6.sin6_addr;
+ iplen = 16;
+ port = sin6.sin6_port;
+ } else {
+ return {};
+ }
+
+ const auto& c1 = old ? oldsecret : secret;
+ Blob data;
+ data.reserve(sizeof(secret)+sizeof(in_port_t)+iplen);
+ data.insert(data.end(), (uint8_t*)&c1, ((uint8_t*)&c1) + sizeof(c1));
+ data.insert(data.end(), (uint8_t*)ip, (uint8_t*)ip+iplen);
+ data.insert(data.end(), (uint8_t*)&port, ((uint8_t*)&port)+sizeof(in_port_t));
+ return crypto::hash(data, TOKEN_SIZE);
+}
+
+bool
+Dht::tokenMatch(const Blob& token, const SockAddr& addr) const
+{
+ if (not addr or token.size() != TOKEN_SIZE)
+ return false;
+ if (token == makeToken(addr, false))
+ return true;
+ if (token == makeToken(addr, true))
+ return true;
+ return false;
+}
+
+NodeStats
+Dht::getNodesStats(sa_family_t af) const
+{
+ NodeStats stats {};
+ const auto& now = scheduler.time();
+ const auto& bcks = buckets(af);
+ for (const auto& b : bcks) {
+ for (auto& n : b.nodes) {
+ if (n->isGood(now)) {
+ stats.good_nodes++;
+ if (n->isIncoming())
+ stats.incoming_nodes++;
+ } else if (not n->isExpired())
+ stats.dubious_nodes++;
+ }
+ if (b.cached)
+ stats.cached_nodes++;
+ }
+ stats.table_depth = bcks.depth(bcks.findBucket(myid));
+ return stats;
+}
+
+void
+Dht::dumpBucket(const Bucket& b, std::ostream& out) const
+{
+ const auto& now = scheduler.time();
+ using namespace std::chrono;
+ out << b.first << " count " << b.nodes.size() << " age " << duration_cast<seconds>(now - b.time).count() << " sec";
+ if (b.cached)
+ out << " (cached)";
+ out << std::endl;
+ for (auto& n : b.nodes) {
+ out << " Node " << n->toString();
+ const auto& t = n->getTime();
+ const auto& r = n->getReplyTime();
+ if (t != r)
+ out << " age " << duration_cast<seconds>(now - t).count() << ", reply: " << duration_cast<seconds>(now - r).count();
+ else
+ out << " age " << duration_cast<seconds>(now - t).count();
+ if (n->isExpired())
+ out << " [expired]";
+ else if (n->isGood(now))
+ out << " [good]";
+ out << std::endl;
+ }
+}
+
+void
+Dht::dumpSearch(const Search& sr, std::ostream& out) const
+{
+ const auto& now = scheduler.time();
+ using namespace std::chrono;
+ out << std::endl << "Search IPv" << (sr.af == AF_INET6 ? '6' : '4') << ' ' << sr.id << " gets: " << sr.callbacks.size();
+ out << ", age: " << duration_cast<seconds>(now - sr.step_time).count() << " s";
+ if (sr.done)
+ out << " [done]";
+ if (sr.expired)
+ out << " [expired]";
+ bool synced = sr.isSynced(now);
+ out << (synced ? " [synced]" : " [not synced]");
+ if (synced && sr.isListening(now))
+ out << " [listening]";
+ out << std::endl;
+
+ /*printing the queries*/
+ if (sr.callbacks.size() + sr.listeners.size() > 0)
+ out << "Queries:" << std::endl;
+ for (const auto& cb : sr.callbacks) {
+ out << *cb.second.query << std::endl;
+ }
+ for (const auto& l : sr.listeners) {
+ out << *l.second.query << std::endl;
+ }
+
+ for (const auto& a : sr.announce) {
+ bool announced = sr.isAnnounced(a.value->id);
+ out << "Announcement: " << *a.value << (announced ? " [announced]" : "") << std::endl;
+ }
+
+ out << " Common bits InfoHash Conn. Get Ops IP" << std::endl;
+ unsigned i = 0;
+ auto last_get = sr.getLastGetTime();
+ for (const auto& n : sr.nodes) {
+ i++;
+ out << std::setfill (' ') << std::setw(3) << InfoHash::commonBits(sr.id, n.node->id) << ' ' << n.node->id;
+ out << ' ' << (findNode(n.node->id, sr.af) ? '*' : ' ');
+ out << " [";
+ if (auto pendingCount = n.node->getPendingMessageCount())
+ out << pendingCount;
+ else
+ out << ' ';
+ out << (n.node->isExpired() ? 'x' : ' ') << "]";
+
+ // Get status
+ {
+ char g_i = n.pending(n.getStatus) ? (n.candidate ? 'c' : 'f') : ' ';
+ char s_i = n.isSynced(now) ? (n.last_get_reply > last_get ? 'u' : 's') : '-';
+ out << " [" << s_i << g_i << "] ";
+ }
+
+ // Listen status
+ if (not sr.listeners.empty()) {
+ if (n.listenStatus.empty())
+ out << " ";
+ else
+ out << "["
+ << (n.isListening(now) ? 'l' : (n.pending(n.listenStatus) ? 'f' : ' ')) << "] ";
+ }
+
+ // Announce status
+ if (not sr.announce.empty()) {
+ if (n.acked.empty()) {
+ out << " ";
+ for (size_t a=0; a < sr.announce.size(); a++)
+ out << ' ';
+ } else {
+ out << "[";
+ for (const auto& a : sr.announce) {
+ auto ack = n.acked.find(a.value->id);
+ if (ack == n.acked.end() or not ack->second.first) {
+ out << ' ';
+ } else {
+ out << ack->second.first->getStateChar();
+ }
+ }
+ out << "] ";
+ }
+ }
+ out << n.node->getAddrStr() << std::endl;
+ }
+}
+
+void
+Dht::dumpTables() const
+{
+ std::stringstream out;
+ out << "My id " << myid << std::endl;
+
+ out << "Buckets IPv4 :" << std::endl;
+ for (const auto& b : buckets4)
+ dumpBucket(b, out);
+ out << "Buckets IPv6 :" << std::endl;
+ for (const auto& b : buckets6)
+ dumpBucket(b, out);
+
+ auto dump_searches = [&](std::map<InfoHash, Sp<Search>> srs) {
+ for (auto& srp : srs)
+ dumpSearch(*srp.second, out);
+ };
+ dump_searches(searches4);
+ dump_searches(searches6);
+ out << std::endl;
+
+ out << getStorageLog() << std::endl;
+
+ DHT_LOG.d("%s", out.str().c_str());
+}
+
+std::string
+Dht::getStorageLog() const
+{
+ std::stringstream out;
+ for (const auto& s : store)
+ out << printStorageLog(s);
+ out << std::endl << std::endl;
+ std::multimap<size_t, const SockAddr*> q_map;
+ for (const auto& ip : store_quota)
+ if (ip.second.size())
+ q_map.emplace(ip.second.size(), &ip.first);
+ for (auto ip = q_map.rbegin(); ip != q_map.rend(); ++ip)
+ out << "IP " << ip->second->toString() << " uses " << ip->first << " bytes" << std::endl;
+ out << std::endl;
+ out << "Total " << store.size() << " storages, " << total_values << " values (";
+ if (total_store_size < 1024)
+ out << total_store_size << " bytes)";
+ else
+ out << (total_store_size/1024) << " / " << (max_store_size/1024) << " KB)";
+ out << std::endl;
+ return out.str();
+}
+
+std::string
+Dht::getStorageLog(const InfoHash& h) const
+{
+ auto s = store.find(h);
+ if (s == store.end()) {
+ std::stringstream out;
+ out << "Storage " << h << " empty" << std::endl;
+ return out.str();
+ }
+ return printStorageLog(*s);
+}
+
+std::string
+Dht::printStorageLog(const decltype(store)::value_type& s) const
+{
+ std::stringstream out;
+ using namespace std::chrono;
+ const auto& st = s.second;
+ out << "Storage " << s.first << " "
+ << st.listeners.size() << " list., "
+ << st.valueCount() << " values ("
+ << st.totalSize() << " bytes)" << std::endl;
+ if (not st.local_listeners.empty())
+ out << " " << st.local_listeners.size() << " local listeners" << std::endl;
+ for (const auto& node_listeners : st.listeners) {
+ const auto& node = node_listeners.first;
+ out << " " << "Listener " << node->toString() << " : " << node_listeners.second.size() << " entries" << std::endl;
+ }
+ return out.str();
+}
+
+std::string
+Dht::getRoutingTablesLog(sa_family_t af) const
+{
+ std::stringstream out;
+ for (const auto& b : buckets(af))
+ dumpBucket(b, out);
+ return out.str();
+}
+
+std::string
+Dht::getSearchesLog(sa_family_t af) const
+{
+ std::stringstream out;
+ auto num_searches = searches4.size() + searches6.size();
+ if (num_searches > 8) {
+ if (not af or af == AF_INET)
+ for (const auto& sr : searches4)
+ out << "[search " << sr.first << " IPv4]" << std::endl;
+ if (not af or af == AF_INET6)
+ for (const auto& sr : searches6)
+ out << "[search " << sr.first << " IPv6]" << std::endl;
+ } else {
+ out << "s:synched, u:updated, a:announced, c:candidate, f:cur req, x:expired, *:known" << std::endl;
+ if (not af or af == AF_INET)
+ for (const auto& sr : searches4)
+ dumpSearch(*sr.second, out);
+ if (not af or af == AF_INET6)
+ for (const auto& sr : searches6)
+ dumpSearch(*sr.second, out);
+ }
+ out << "Total: " << num_searches << " searches (" << searches4.size() << " IPv4, " << searches6.size() << " IPv6)." << std::endl;
+ return out.str();
+}
+
+std::string
+Dht::getSearchLog(const InfoHash& id, sa_family_t af) const
+{
+ std::stringstream out;
+ if (af == AF_UNSPEC) {
+ out << getSearchLog(id, AF_INET) << getSearchLog(id, AF_INET6);
+ } else {
+ auto& srs = searches(af);
+ auto sr = srs.find(id);
+ if (sr != srs.end())
+ dumpSearch(*sr->second, out);
+ }
+ return out.str();
+}
+
+Dht::~Dht()
+{
+ for (auto& s : searches4)
+ s.second->clear();
+ for (auto& s : searches6)
+ s.second->clear();
+}
+
+Dht::Dht() : store(), network_engine(DHT_LOG, scheduler) {}
+
+Dht::Dht(const int& s, const int& s6, Config config)
+ : myid(config.node_id ? config.node_id : InfoHash::getRandom()),
+ is_bootstrap(config.is_bootstrap),
+ maintain_storage(config.maintain_storage), store(), store_quota(),
+ network_engine(myid, config.network, s, s6, DHT_LOG, scheduler,
+ std::bind(&Dht::onError, this, _1, _2),
+ std::bind(&Dht::onNewNode, this, _1, _2),
+ std::bind(&Dht::onReportedAddr, this, _1, _2),
+ std::bind(&Dht::onPing, this, _1),
+ std::bind(&Dht::onFindNode, this, _1, _2, _3),
+ std::bind(&Dht::onGetValues, this, _1, _2, _3, _4),
+ std::bind(&Dht::onListen, this, _1, _2, _3, _4, _5),
+ std::bind(&Dht::onAnnounce, this, _1, _2, _3, _4, _5),
+ std::bind(&Dht::onRefresh, this, _1, _2, _3, _4))
+{
+ scheduler.syncTime();
+ if (s < 0 && s6 < 0)
+ return;
+ if (s >= 0) {
+ buckets4 = {Bucket {AF_INET}};
+ buckets4.is_client = config.is_bootstrap;
+ }
+ if (s6 >= 0) {
+ buckets6 = {Bucket {AF_INET6}};
+ buckets6.is_client = config.is_bootstrap;
+ }
+
+ search_id = std::uniform_int_distribution<decltype(search_id)>{}(rd);
+
+ uniform_duration_distribution<> time_dis {std::chrono::seconds(3), std::chrono::seconds(5)};
+ auto confirm_nodes_time = scheduler.time() + time_dis(rd);
+ DHT_LOG.d(myid, "Scheduling %s", myid.toString().c_str());
+ nextNodesConfirmation = scheduler.add(confirm_nodes_time, std::bind(&Dht::confirmNodes, this));
+
+ // Fill old secret
+ {
+ crypto::random_device rdev;
+ secret = std::uniform_int_distribution<uint64_t>{}(rdev);
+ }
+ rotateSecrets();
+
+ expire();
+
+ DHT_LOG.d("DHT initialised with node ID %s", myid.toString().c_str());
+}
+
+
+bool
+Dht::neighbourhoodMaintenance(RoutingTable& list)
+{
+ //DHT_LOG_DBG("neighbourhoodMaintenance");
+ auto b = list.findBucket(myid);
+ if (b == list.end())
+ return false;
+
+ InfoHash id = myid;
+#ifdef _WIN32
+ std::uniform_int_distribution<int> rand_byte{ 0, std::numeric_limits<uint8_t>::max() };
+#else
+ std::uniform_int_distribution<uint8_t> rand_byte;
+#endif
+ id[HASH_LEN-1] = rand_byte(rd);
+
+ std::bernoulli_distribution rand_trial(1./8.);
+ auto q = b;
+ if (std::next(q) != list.end() && (q->nodes.empty() || rand_trial(rd)))
+ q = std::next(q);
+ if (b != list.begin() && (q->nodes.empty() || rand_trial(rd))) {
+ auto r = std::prev(b);
+ if (!r->nodes.empty())
+ q = r;
+ }
+
+ auto n = q->randomNode();
+ if (n) {
+ DHT_LOG.d(id, n->id, "[node %s] sending [find %s] for neighborhood maintenance",
+ n->toString().c_str(), id.toString().c_str());
+ /* Since our node-id is the same in both DHTs, it's probably
+ profitable to query both families. */
+ network_engine.sendFindNode(n, id, network_engine.want());
+ }
+
+ return true;
+}
+
+bool
+Dht::bucketMaintenance(RoutingTable& list)
+{
+ std::bernoulli_distribution rand_trial(1./8.);
+ std::bernoulli_distribution rand_trial_38(1./38.);
+
+ bool sent {false};
+ for (auto b = list.begin(); b != list.end(); ++b) {
+ if (b->time < scheduler.time() - std::chrono::minutes(10) || b->nodes.empty()) {
+ /* This bucket hasn't seen any positive confirmation for a long
+ time. Pick a random id in this bucket's range, and send a request
+ to a random node. */
+ InfoHash id = list.randomId(b);
+ auto q = b;
+ /* If the bucket is empty, we try to fill it from a neighbour.
+ We also sometimes do it gratuitiously to recover from
+ buckets full of broken nodes. */
+ if (std::next(b) != list.end() && (q->nodes.empty() || rand_trial(rd)))
+ q = std::next(b);
+ if (b != list.begin() && (q->nodes.empty() || rand_trial(rd))) {
+ auto r = std::prev(b);
+ if (!r->nodes.empty())
+ q = r;
+ }
+
+ auto n = q->randomNode();
+ if (n and not n->isPendingMessage()) {
+ want_t want = -1;
+
+ if (network_engine.want() != want) {
+ auto otherbucket = findBucket(id, q->af == AF_INET ? AF_INET6 : AF_INET);
+ if (otherbucket && otherbucket->nodes.size() < TARGET_NODES)
+ /* The corresponding bucket in the other family
+ is emptyish -- querying both is useful. */
+ want = WANT4 | WANT6;
+ else if (rand_trial_38(rd))
+ /* Most of the time, this just adds overhead.
+ However, it might help stitch back one of
+ the DHTs after a network collapse, so query
+ both, but only very occasionally. */
+ want = WANT4 | WANT6;
+ }
+
+ DHT_LOG.d(id, n->id, "[node %s] sending find %s for bucket maintenance", n->toString().c_str(), id.toString().c_str());
+ auto start = scheduler.time();
+ network_engine.sendFindNode(n, id, want, nullptr, [this,start,n](const net::Request&, bool over) {
+ if (over) {
+ const auto& end = scheduler.time();
+ using namespace std::chrono;
+ DHT_LOG.d(n->id, "[node %s] bucket maintenance op expired after %llu ms", n->toString().c_str(), duration_cast<milliseconds>(end-start).count());
+ scheduler.edit(nextNodesConfirmation, end + Node::MAX_RESPONSE_TIME);
+ }
+ });
+ sent = true;
+ }
+ }
+ }
+ return sent;
+}
+
+void
+Dht::dataPersistence(InfoHash id)
+{
+ const auto& now = scheduler.time();
+ auto str = store.find(id);
+ if (str != store.end() and now > str->second.maintenance_time) {
+ DHT_LOG.d(id, "[storage %s] maintenance (%u values, %u bytes)",
+ id.toString().c_str(), str->second.valueCount(), str->second.totalSize());
+ maintainStorage(*str);
+ str->second.maintenance_time = now + MAX_STORAGE_MAINTENANCE_EXPIRE_TIME;
+ scheduler.add(str->second.maintenance_time, std::bind(&Dht::dataPersistence, this, id));
+ }
+}
+
+size_t
+Dht::maintainStorage(decltype(store)::value_type& storage, bool force, DoneCallback donecb)
+{
+ const auto& now = scheduler.time();
+ size_t announce_per_af = 0;
+
+ bool want4 = true, want6 = true;
+
+ auto nodes = buckets4.findClosestNodes(storage.first, now);
+ if (!nodes.empty()) {
+ if (force || storage.first.xorCmp(nodes.back()->id, myid) < 0) {
+ for (auto &value : storage.second.getValues()) {
+ const auto& vt = getType(value.data->type);
+ if (force || value.created + vt.expiration > now + MAX_STORAGE_MAINTENANCE_EXPIRE_TIME) {
+ // gotta put that value there
+ announce(storage.first, AF_INET, value.data, donecb, value.created);
+ ++announce_per_af;
+ }
+ }
+ want4 = false;
+ }
+ }
+
+ auto nodes6 = buckets6.findClosestNodes(storage.first, now);
+ if (!nodes6.empty()) {
+ if (force || storage.first.xorCmp(nodes6.back()->id, myid) < 0) {
+ for (auto &value : storage.second.getValues()) {
+ const auto& vt = getType(value.data->type);
+ if (force || value.created + vt.expiration > now + MAX_STORAGE_MAINTENANCE_EXPIRE_TIME) {
+ // gotta put that value there
+ announce(storage.first, AF_INET6, value.data, donecb, value.created);
+ ++announce_per_af;
+ }
+ }
+ want6 = false;
+ }
+ }
+
+ if (not want4 and not want6) {
+ DHT_LOG.d(storage.first, "Discarding storage values %s", storage.first.toString().c_str());
+ auto diff = storage.second.clear();
+ total_store_size += diff.size_diff;
+ total_values += diff.values_diff;
+ }
+
+ return announce_per_af;
+}
+
+time_point
+Dht::periodic(const uint8_t *buf, size_t buflen, const SockAddr& from)
+{
+ scheduler.syncTime();
+ if (buflen) {
+ try {
+ network_engine.processMessage(buf, buflen, from);
+ } catch (const std::exception& e) {
+ DHT_LOG.e("Can't process message from %s: %s", from.toString().c_str(), e.what());
+ }
+ }
+ return scheduler.run();
+}
+
+void
+Dht::expire()
+{
+ uniform_duration_distribution<> time_dis(std::chrono::minutes(2), std::chrono::minutes(6));
+ auto expire_stuff_time = scheduler.time() + duration(time_dis(rd));
+
+ expireBuckets(buckets4);
+ expireBuckets(buckets6);
+ expireStore();
+ expireSearches();
+ scheduler.add(expire_stuff_time, std::bind(&Dht::expire, this));
+}
+
+void
+Dht::confirmNodes()
+{
+ using namespace std::chrono;
+ bool soon = false;
+ const auto& now = scheduler.time();
+
+ if (searches4.empty() and getStatus(AF_INET) == NodeStatus::Connected) {
+ DHT_LOG.d(myid, "[confirm nodes] initial IPv4 'get' for my id (%s)", myid.toString().c_str());
+ search(myid, AF_INET);
+ }
+ if (searches6.empty() and getStatus(AF_INET6) == NodeStatus::Connected) {
+ DHT_LOG.d(myid, "[confirm nodes] initial IPv6 'get' for my id (%s)", myid.toString().c_str());
+ search(myid, AF_INET6);
+ }
+
+ soon |= bucketMaintenance(buckets4);
+ soon |= bucketMaintenance(buckets6);
+
+ if (!soon) {
+ if (buckets4.grow_time >= now - seconds(150))
+ soon |= neighbourhoodMaintenance(buckets4);
+ if (buckets6.grow_time >= now - seconds(150))
+ soon |= neighbourhoodMaintenance(buckets6);
+ }
+
+ /* In order to maintain all buckets' age within 600 seconds, worst
+ case is roughly 27 seconds, assuming the table is 22 bits deep.
+ We want to keep a margin for neighborhood maintenance, so keep
+ this within 25 seconds. */
+ auto time_dis = soon
+ ? uniform_duration_distribution<> {seconds(5) , seconds(25)}
+ : uniform_duration_distribution<> {seconds(60), seconds(180)};
+ auto confirm_nodes_time = now + time_dis(rd);
+
+ scheduler.edit(nextNodesConfirmation, confirm_nodes_time);
+}
+
+std::vector<ValuesExport>
+Dht::exportValues() const
+{
+ std::vector<ValuesExport> e {};
+ e.reserve(store.size());
+ for (const auto& h : store) {
+ ValuesExport ve;
+ ve.first = h.first;
+
+ msgpack::sbuffer buffer;
+ msgpack::packer<msgpack::sbuffer> pk(&buffer);
+ const auto& vals = h.second.getValues();
+ pk.pack_array(vals.size());
+ for (const auto& v : vals) {
+ pk.pack_array(2);
+ pk.pack(v.created.time_since_epoch().count());
+ v.data->msgpack_pack(pk);
+ }
+ ve.second = {buffer.data(), buffer.data()+buffer.size()};
+ e.push_back(std::move(ve));
+ }
+ return e;
+}
+
+void
+Dht::importValues(const std::vector<ValuesExport>& import)
+{
+ for (const auto& h : import) {
+ if (h.second.empty())
+ continue;
+
+ const auto& now = scheduler.time();
+ try {
+ msgpack::unpacked msg;
+ msgpack::unpack(msg, (const char*)h.second.data(), h.second.size());
+ auto valarr = msg.get();
+ if (valarr.type != msgpack::type::ARRAY)
+ throw msgpack::type_error();
+ for (unsigned i = 0; i < valarr.via.array.size; i++) {
+ auto& valel = valarr.via.array.ptr[i];
+ if (valel.type != msgpack::type::ARRAY or valel.via.array.size < 2)
+ throw msgpack::type_error();
+ time_point val_time;
+ Value tmp_val;
+ try {
+ val_time = time_point{time_point::duration{valel.via.array.ptr[0].as<time_point::duration::rep>()}};
+ tmp_val.msgpack_unpack(valel.via.array.ptr[1]);
+ } catch (const std::exception&) {
+ DHT_LOG.e(h.first, "Error reading value at %s", h.first.toString().c_str());
+ continue;
+ }
+ val_time = std::min(val_time, now);
+ storageStore(h.first, std::make_shared<Value>(std::move(tmp_val)), val_time);
+ }
+ } catch (const std::exception&) {
+ DHT_LOG.e(h.first, "Error reading values at %s", h.first.toString().c_str());
+ continue;
+ }
+ }
+}
+
+
+std::vector<NodeExport>
+Dht::exportNodes()
+{
+ const auto& now = scheduler.time();
+ std::vector<NodeExport> nodes;
+ const auto b4 = buckets4.findBucket(myid);
+ if (b4 != buckets4.end()) {
+ for (auto& n : b4->nodes)
+ if (n->isGood(now))
+ nodes.push_back(n->exportNode());
+ }
+ const auto b6 = buckets6.findBucket(myid);
+ if (b6 != buckets6.end()) {
+ for (auto& n : b6->nodes)
+ if (n->isGood(now))
+ nodes.push_back(n->exportNode());
+ }
+ for (auto b = buckets4.begin(); b != buckets4.end(); ++b) {
+ if (b == b4) continue;
+ for (auto& n : b->nodes)
+ if (n->isGood(now))
+ nodes.push_back(n->exportNode());
+ }
+ for (auto b = buckets6.begin(); b != buckets6.end(); ++b) {
+ if (b == b6) continue;
+ for (auto& n : b->nodes)
+ if (n->isGood(now))
+ nodes.push_back(n->exportNode());
+ }
+ return nodes;
+}
+
+void
+Dht::insertNode(const InfoHash& id, const SockAddr& addr)
+{
+ if (addr.getFamily() != AF_INET && addr.getFamily() != AF_INET6)
+ return;
+ scheduler.syncTime();
+ network_engine.insertNode(id, addr);
+}
+
+void
+Dht::pingNode(const sockaddr* sa, socklen_t salen, DoneCallbackSimple&& cb)
+{
+ scheduler.syncTime();
+ DHT_LOG.d("Sending ping to %s", print_addr(sa, salen).c_str());
+ auto& count = sa->sa_family == AF_INET ? pending_pings4 : pending_pings6;
+ count++;
+ network_engine.sendPing(sa, salen, [&count,cb](const net::Request&, net::RequestAnswer&&) {
+ count--;
+ if (cb)
+ cb(true);
+ }, [&count,cb](const net::Request&, bool last){
+ if (last) {
+ count--;
+ if (cb)
+ cb(false);
+ }
+ });
+}
+
+void
+Dht::onError(Sp<net::Request> req, net::DhtProtocolException e) {
+ const auto& node = req->node;
+ if (e.getCode() == net::DhtProtocolException::UNAUTHORIZED) {
+ DHT_LOG.e(node->id, "[node %s] token flush", node->toString().c_str());
+ node->authError();
+ node->cancelRequest(req);
+ for (auto& srp : searches(node->getFamily())) {
+ auto& sr = srp.second;
+ for (auto& n : sr->nodes) {
+ if (n.node != node) continue;
+ n.token.clear();
+ n.last_get_reply = time_point::min();
+ searchSendGetValues(sr);
+ scheduler.edit(sr->nextSearchStep, scheduler.time());
+ break;
+ }
+ }
+ } else if (e.getCode() == net::DhtProtocolException::NOT_FOUND) {
+ DHT_LOG.e(node->id, "[node %s] returned error 404: storage not found", node->toString().c_str());
+ node->cancelRequest(req);
+ }
+}
+
+void
+Dht::onReportedAddr(const InfoHash& /*id*/, const SockAddr& addr)
+{
+ if (addr)
+ reportedAddr(addr);
+}
+
+net::RequestAnswer
+Dht::onPing(Sp<Node>)
+{
+ return {};
+}
+
+net::RequestAnswer
+Dht::onFindNode(Sp<Node> node, const InfoHash& target, want_t want)
+{
+ const auto& now = scheduler.time();
+ net::RequestAnswer answer;
+ answer.ntoken = makeToken(node->getAddr(), false);
+ if (want & WANT4)
+ answer.nodes4 = buckets4.findClosestNodes(target, now, TARGET_NODES);
+ if (want & WANT6)
+ answer.nodes6 = buckets6.findClosestNodes(target, now, TARGET_NODES);
+ return answer;
+}
+
+net::RequestAnswer
+Dht::onGetValues(Sp<Node> node, const InfoHash& hash, want_t, const Query& query)
+{
+ if (not hash) {
+ DHT_LOG.w("[node %s] Eek! Got get_values with no info_hash", node->toString().c_str());
+ throw net::DhtProtocolException {
+ net::DhtProtocolException::NON_AUTHORITATIVE_INFORMATION,
+ net::DhtProtocolException::GET_NO_INFOHASH
+ };
+ }
+ const auto& now = scheduler.time();
+ net::RequestAnswer answer {};
+ auto st = store.find(hash);
+ answer.ntoken = makeToken(node->getAddr(), false);
+ answer.nodes4 = buckets4.findClosestNodes(hash, now, TARGET_NODES);
+ answer.nodes6 = buckets6.findClosestNodes(hash, now, TARGET_NODES);
+ if (st != store.end() && not st->second.empty()) {
+ answer.values = st->second.get(query.where.getFilter());
+ DHT_LOG.d(hash, "[node %s] sending %u values", node->toString().c_str(), answer.values.size());
+ }
+ return answer;
+}
+
+void Dht::onGetValuesDone(const Sp<Node>& node,
+ net::RequestAnswer& a,
+ Sp<Search>& sr,
+ const Sp<Query>& orig_query)
+{
+ if (not sr) {
+ DHT_LOG.w("[search unknown] got reply to 'get'. Ignoring.");
+ return;
+ }
+
+ /*DHT_LOG.d(sr->id, "[search %s] [node %s] got reply to 'get' with %u nodes",
+ sr->id.toString().c_str(), node->toString().c_str(), a.nodes4.size()+a.nodes6.size());*/
+
+ if (not a.ntoken.empty()) {
+ if (not a.values.empty() or not a.fields.empty()) {
+ DHT_LOG.d(sr->id, node->id, "[search %s] [node %s] found %u values",
+ sr->id.toString().c_str(), node->toString().c_str(), a.values.size());
+ for (auto& getp : sr->callbacks) { /* call all callbacks for this search */
+ auto& get = getp.second;
+ if (not (get.get_cb or get.query_cb) or
+ (orig_query and get.query and not get.query->isSatisfiedBy(*orig_query)))
+ continue;
+
+ if (get.query_cb) { /* in case of a request with query */
+ if (not a.fields.empty()) {
+ get.query_cb(a.fields);
+ } else if (not a.values.empty()) {
+ std::vector<Sp<FieldValueIndex>> fields;
+ fields.reserve(a.values.size());
+ for (const auto& v : a.values)
+ fields.emplace_back(std::make_shared<FieldValueIndex>(*v, orig_query ? orig_query->select : Select {}));
+ get.query_cb(fields);
+ }
+ } else if (get.get_cb) { /* in case of a vanilla get request */
+ std::vector<Sp<Value>> tmp;
+ for (const auto& v : a.values)
+ if (not get.filter or get.filter(*v))
+ tmp.emplace_back(v);
+ if (not tmp.empty())
+ get.get_cb(tmp);
+ }
+ }
+
+ /* callbacks for local search listeners */
+ /*std::vector<std::pair<ValueCallback, std::vector<Sp<Value>>>> tmp_lists;
+ for (auto& l : sr->listeners) {
+ if (!l.second.get_cb or (orig_query and l.second.query and not l.second.query->isSatisfiedBy(*orig_query)))
+ continue;
+ std::vector<Sp<Value>> tmp;
+ for (const auto& v : a.values)
+ if (not l.second.filter or l.second.filter(*v))
+ tmp.emplace_back(v);
+ if (not tmp.empty())
+ tmp_lists.emplace_back(l.second.get_cb, std::move(tmp));
+ }
+ for (auto& l : tmp_lists)
+ l.first(l.second, false);*/
+ } else if (not a.expired_values.empty()) {
+ DHT_LOG.w(sr->id, node->id, "[search %s] [node %s] %u expired values",
+ sr->id.toString().c_str(), node->toString().c_str(), a.expired_values.size());
+ }
+ } else {
+ DHT_LOG.w(sr->id, "[node %s] no token provided. Ignoring response content.", node->toString().c_str());
+ network_engine.blacklistNode(node);
+ }
+
+ if (not sr->done) {
+ searchSendGetValues(sr);
+
+ // Force to recompute the next step time
+ scheduler.edit(sr->nextSearchStep, scheduler.time());
+ }
+}
+
+net::RequestAnswer
+Dht::onListen(Sp<Node> node, const InfoHash& hash, const Blob& token, size_t socket_id, const Query& query)
+{
+ if (not hash) {
+ DHT_LOG.w(node->id, "[node %s] listen with no info_hash", node->toString().c_str());
+ throw net::DhtProtocolException {
+ net::DhtProtocolException::NON_AUTHORITATIVE_INFORMATION,
+ net::DhtProtocolException::LISTEN_NO_INFOHASH
+ };
+ }
+ if (not tokenMatch(token, node->getAddr())) {
+ DHT_LOG.w(hash, node->id, "[node %s] incorrect token %s for 'listen'", node->toString().c_str(), hash.toString().c_str());
+ throw net::DhtProtocolException {net::DhtProtocolException::UNAUTHORIZED, net::DhtProtocolException::LISTEN_WRONG_TOKEN};
+ }
+ Query q = query;
+ storageAddListener(hash, node, socket_id, std::move(q));
+ return {};
+}
+
+void
+Dht::onListenDone(const Sp<Node>& node,
+ net::RequestAnswer& answer,
+ Sp<Search>& sr)
+{
+ // DHT_LOG.d(sr->id, node->id, "[search %s] [node %s] got listen confirmation",
+ // sr->id.toString().c_str(), node->toString().c_str(), answer.values.size());
+
+ if (not sr->done) {
+ const auto& now = scheduler.time();
+ searchSendGetValues(sr);
+ scheduler.edit(sr->nextSearchStep, now);
+ }
+}
+
+net::RequestAnswer
+Dht::onAnnounce(Sp<Node> n,
+ const InfoHash& hash,
+ const Blob& token,
+ const std::vector<Sp<Value>>& values,
+ const time_point& creation_date)
+{
+ auto& node = *n;
+ if (not hash) {
+ DHT_LOG.w(node.id, "put with no info_hash");
+ throw net::DhtProtocolException {
+ net::DhtProtocolException::NON_AUTHORITATIVE_INFORMATION,
+ net::DhtProtocolException::PUT_NO_INFOHASH
+ };
+ }
+ if (!tokenMatch(token, node.getAddr())) {
+ DHT_LOG.w(hash, node.id, "[node %s] incorrect token %s for 'put'", node.toString().c_str(), hash.toString().c_str());
+ throw net::DhtProtocolException {net::DhtProtocolException::UNAUTHORIZED, net::DhtProtocolException::PUT_WRONG_TOKEN};
+ }
+ {
+ // We store a value only if we think we're part of the
+ // SEARCH_NODES nodes around the target id.
+ auto closest_nodes = buckets(node.getFamily()).findClosestNodes(hash, scheduler.time(), SEARCH_NODES);
+ if (closest_nodes.size() >= TARGET_NODES and hash.xorCmp(closest_nodes.back()->id, myid) < 0) {
+ DHT_LOG.w(hash, node.id, "[node %s] announce too far from the target. Dropping value.", node.toString().c_str());
+ return {};
+ }
+ }
+
+ auto created = std::min(creation_date, scheduler.time());
+ for (const auto& v : values) {
+ if (v->id == Value::INVALID_ID) {
+ DHT_LOG.w(hash, node.id, "[value %s] incorrect value id", hash.toString().c_str());
+ throw net::DhtProtocolException {
+ net::DhtProtocolException::NON_AUTHORITATIVE_INFORMATION,
+ net::DhtProtocolException::PUT_INVALID_ID
+ };
+ }
+ auto lv = getLocalById(hash, v->id);
+ Sp<Value> vc = v;
+ if (lv) {
+ if (*lv == *vc) {
+ storageRefresh(hash, v->id);
+ DHT_LOG.d(hash, node.id, "[store %s] [node %s] refreshed value %s", hash.toString().c_str(), node.toString().c_str(), std::to_string(v->id).c_str());
+ } else {
+ const auto& type = getType(lv->type);
+ if (type.editPolicy(hash, lv, vc, node.id, node.getAddr())) {
+ DHT_LOG.d(hash, node.id, "[store %s] editing %s",
+ hash.toString().c_str(), vc->toString().c_str());
+ storageStore(hash, vc, created, node.getAddr());
+ } else {
+ DHT_LOG.d(hash, node.id, "[store %s] rejecting edition of %s because of storage policy",
+ hash.toString().c_str(), vc->toString().c_str());
+ }
+ }
+ } else {
+ // Allow the value to be edited by the storage policy
+ const auto& type = getType(vc->type);
+ if (type.storePolicy(hash, vc, node.id, node.getAddr())) {
+ //DHT_LOG.d(hash, node.id, "[store %s] storing %s", hash.toString().c_str(), std::to_string(vc->id).c_str());
+ storageStore(hash, vc, created, node.getAddr());
+ } else {
+ DHT_LOG.d(hash, node.id, "[store %s] rejecting storage of %s",
+ hash.toString().c_str(), vc->toString().c_str());
+ }
+ }
+ }
+ return {};
+}
+
+net::RequestAnswer
+Dht::onRefresh(Sp<Node> node, const InfoHash& hash, const Blob& token, const Value::Id& vid)
+{
+ using namespace net;
+
+ if (not tokenMatch(token, node->getAddr())) {
+ DHT_LOG.w(hash, node->id, "[node %s] incorrect token %s for 'put'", node->toString().c_str(), hash.toString().c_str());
+ throw DhtProtocolException {DhtProtocolException::UNAUTHORIZED, DhtProtocolException::PUT_WRONG_TOKEN};
+ }
+ if (storageRefresh(hash, vid)) {
+ DHT_LOG.d(hash, node->id, "[store %s] [node %s] refreshed value %s", hash.toString().c_str(), node->toString().c_str(), std::to_string(vid).c_str());
+ } else {
+ DHT_LOG.d(hash, node->id, "[store %s] [node %s] got refresh for unknown value",
+ hash.toString().c_str(), node->toString().c_str());
+ throw DhtProtocolException {DhtProtocolException::NOT_FOUND, DhtProtocolException::STORAGE_NOT_FOUND};
+ }
+ return {};
+}
+
+bool
+Dht::storageRefresh(const InfoHash& id, Value::Id vid)
+{
+ const auto& now = scheduler.time();
+ auto s = store.find(id);
+ if (s != store.end()) {
+ auto expiration = s->second.refresh(now, vid, types);
+ if (expiration != time_point::max())
+ scheduler.add(expiration, std::bind(&Dht::expireStorage, this, id));
+ return true;
+ }
+ return false;
+}
+
+void
+Dht::onAnnounceDone(const Sp<Node>& node, net::RequestAnswer& answer, Sp<Search>& sr)
+{
+ DHT_LOG.d(sr->id, node->id, "[search %s] [node %s] got reply to put!",
+ sr->id.toString().c_str(), node->toString().c_str());
+ searchSendGetValues(sr);
+ sr->checkAnnounced(answer.vid);
+}
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2016-2018 Savoir-faire Linux Inc.
+ * Author: Sébastien Blin <sebastien.blin@savoirfairelinux.com>
+ * Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#if OPENDHT_PROXY_CLIENT
+
+#include "dht_proxy_client.h"
+
+#include "dhtrunner.h"
+#include "op_cache.h"
+#include "utils.h"
+
+#include <restbed>
+#include <json/json.h>
+
+#include <chrono>
+#include <vector>
+
+namespace dht {
+
+struct DhtProxyClient::InfoState {
+ std::atomic_uint ipv4 {0}, ipv6 {0};
+ std::atomic_bool cancel {false};
+};
+
+struct DhtProxyClient::ListenState {
+ std::atomic_bool ok {true};
+ std::atomic_bool cancel {false};
+};
+
+struct DhtProxyClient::Listener
+{
+ ValueCache cache;
+ Sp<Scheduler::Job> cacheExpirationJob {};
+ ValueCallback cb;
+ Value::Filter filter;
+ Sp<restbed::Request> req;
+ std::thread thread;
+ unsigned callbackId;
+ Sp<ListenState> state;
+ Sp<proxy::ListenToken> pushNotifToken;
+ Sp<Scheduler::Job> refreshJob;
+ Listener(ValueCache&& c, Sp<Scheduler::Job>&& j, const Sp<restbed::Request>& r, Value::Filter&& f)
+ : cache(std::move(c)),
+ cacheExpirationJob(std::move(j)),
+ filter(std::move(f)),
+ req(r)
+ {}
+};
+
+struct PermanentPut {
+ Sp<Value> value;
+ Sp<Scheduler::Job> refreshJob;
+ Sp<std::atomic_bool> ok;
+};
+
+struct DhtProxyClient::ProxySearch {
+ SearchCache ops {};
+ Sp<Scheduler::Job> opExpirationJob;
+ std::map<size_t, Listener> listeners {};
+ std::map<Value::Id, PermanentPut> puts {};
+};
+
+DhtProxyClient::DhtProxyClient() {}
+
+DhtProxyClient::DhtProxyClient(std::function<void()> signal, const std::string& serverHost, const std::string& pushClientId)
+: serverHost_(serverHost), pushClientId_(pushClientId), loopSignal_(signal)
+{
+ if (!serverHost_.empty())
+ startProxy();
+}
+
+void
+DhtProxyClient::confirmProxy()
+{
+ if (serverHost_.empty()) return;
+ getConnectivityStatus();
+}
+
+void
+DhtProxyClient::startProxy()
+{
+ if (serverHost_.empty()) return;
+ DHT_LOG.w("Staring proxy client to %s", serverHost_.c_str());
+ nextProxyConfirmation = scheduler.add(scheduler.time(), std::bind(&DhtProxyClient::confirmProxy, this));
+ listenerRestart = std::make_shared<Scheduler::Job>(std::bind(&DhtProxyClient::restartListeners, this));
+ loopSignal_();
+}
+
+DhtProxyClient::~DhtProxyClient()
+{
+ isDestroying_ = true;
+ cancelAllOperations();
+ cancelAllListeners();
+ if (infoState_)
+ infoState_->cancel = true;
+ if (statusThread_.joinable())
+ statusThread_.join();
+}
+
+std::vector<Sp<Value>>
+DhtProxyClient::getLocal(const InfoHash& k, Value::Filter filter) const {
+ std::lock_guard<std::mutex> lock(searchLock_);
+ auto s = searches_.find(k);
+ if (s == searches_.end())
+ return {};
+ return s->second.ops.get(filter);
+}
+
+Sp<Value>
+DhtProxyClient::getLocalById(const InfoHash& k, Value::Id id) const {
+ std::lock_guard<std::mutex> lock(searchLock_);
+ auto s = searches_.find(k);
+ if (s == searches_.end())
+ return {};
+ return s->second.ops.get(id);
+}
+
+void
+DhtProxyClient::cancelAllOperations()
+{
+ std::lock_guard<std::mutex> lock(lockOperations_);
+ auto operation = operations_.begin();
+ while (operation != operations_.end()) {
+ if (operation->thread.joinable()) {
+ // Close connection to stop operation?
+ restbed::Http::close(operation->req);
+ operation->thread.join();
+ operation = operations_.erase(operation);
+ } else {
+ ++operation;
+ }
+ }
+}
+
+void
+DhtProxyClient::cancelAllListeners()
+{
+ std::lock_guard<std::mutex> lock(searchLock_);
+ DHT_LOG.w("Cancelling all listeners for %zu searches", searches_.size());
+ for (auto& s: searches_) {
+ s.second.ops.cancelAll([&](size_t token){
+ auto l = s.second.listeners.find(token);
+ if (l == s.second.listeners.end())
+ return;
+ if (l->second.thread.joinable()) {
+ // Close connection to stop listener?
+ l->second.state->cancel = true;
+ if (l->second.req)
+ restbed::Http::close(l->second.req);
+ l->second.thread.join();
+ }
+ s.second.listeners.erase(token);
+ });
+ }
+}
+
+void
+DhtProxyClient::shutdown(ShutdownCallback cb)
+{
+ cancelAllOperations();
+ cancelAllListeners();
+ if (cb)
+ cb();
+}
+
+NodeStatus
+DhtProxyClient::getStatus(sa_family_t af) const
+{
+ std::lock_guard<std::mutex> l(lockCurrentProxyInfos_);
+ switch (af)
+ {
+ case AF_INET:
+ return statusIpv4_;
+ case AF_INET6:
+ return statusIpv6_;
+ default:
+ return NodeStatus::Disconnected;
+ }
+}
+
+bool
+DhtProxyClient::isRunning(sa_family_t af) const
+{
+ std::lock_guard<std::mutex> l(lockCurrentProxyInfos_);
+ switch (af)
+ {
+ case AF_INET:
+ return statusIpv4_ != NodeStatus::Disconnected;
+ case AF_INET6:
+ return statusIpv6_ != NodeStatus::Disconnected;
+ default:
+ return false;
+ }
+}
+
+time_point
+DhtProxyClient::periodic(const uint8_t*, size_t, const SockAddr&)
+{
+ // Exec all currently stored callbacks
+ scheduler.syncTime();
+ if (!callbacks_.empty()) {
+ std::lock_guard<std::mutex> lock(lockCallbacks);
+ for (auto& callback : callbacks_)
+ callback();
+ callbacks_.clear();
+ }
+ // Remove finished operations
+ {
+ std::lock_guard<std::mutex> lock(lockOperations_);
+ auto operation = operations_.begin();
+ while (operation != operations_.end()) {
+ if (*(operation->finished)) {
+ if (operation->thread.joinable()) {
+ // Close connection to stop operation?
+ restbed::Http::close(operation->req);
+ operation->thread.join();
+ }
+ operation = operations_.erase(operation);
+ } else {
+ ++operation;
+ }
+ }
+ }
+ return scheduler.run();
+}
+
+void
+DhtProxyClient::get(const InfoHash& key, GetCallback cb, DoneCallback donecb, Value::Filter&& f, Where&& w)
+{
+ DHT_LOG.d(key, "[search %s]: get", key.to_c_str());
+ restbed::Uri uri(proxy::HTTP_PROTO + serverHost_ + "/" + key.toString());
+ auto req = std::make_shared<restbed::Request>(uri);
+ Value::Filter filter = w.empty() ? f : f.chain(w.getFilter());
+
+ auto finished = std::make_shared<std::atomic_bool>(false);
+ Operation o;
+ o.req = req;
+ o.finished = finished;
+ o.thread = std::thread([=](){
+ // Try to contact the proxy and set the status to connected when done.
+ // will change the connectivity status
+ struct GetState{ std::atomic_bool ok {true}; std::atomic_bool stop {false}; };
+ auto state = std::make_shared<GetState>();
+ try {
+ restbed::Http::async(req,
+ [=](const std::shared_ptr<restbed::Request>& req,
+ const std::shared_ptr<restbed::Response>& reply) {
+ auto code = reply->get_status_code();
+
+ if (code == 200) {
+ try {
+ while (restbed::Http::is_open(req) and not *finished and not state->stop) {
+ restbed::Http::fetch("\n", reply);
+ if (*finished or state->stop)
+ break;
+ std::string body;
+ reply->get_body(body);
+ reply->set_body(""); // Reset the body for the next fetch
+
+ std::string err;
+ Json::Value json;
+ Json::CharReaderBuilder rbuilder;
+ auto* char_data = reinterpret_cast<const char*>(&body[0]);
+ auto reader = std::unique_ptr<Json::CharReader>(rbuilder.newCharReader());
+ if (reader->parse(char_data, char_data + body.size(), &json, &err)) {
+ auto value = std::make_shared<Value>(json);
+ if ((not filter or filter(*value)) and cb) {
+ std::lock_guard<std::mutex> lock(lockCallbacks);
+ callbacks_.emplace_back([cb, value, state]() {
+ if (not state->stop and not cb({value}))
+ state->stop = true;
+ });
+ loopSignal_();
+ }
+ } else {
+ state->ok = false;
+ }
+ }
+ } catch (std::runtime_error& e) { }
+ } else {
+ state->ok = false;
+ }
+ }).wait();
+ } catch(const std::exception& e) {
+ state->ok = false;
+ }
+ if (donecb) {
+ std::lock_guard<std::mutex> lock(lockCallbacks);
+ callbacks_.emplace_back([=](){
+ donecb(state->ok, {});
+ state->stop = true;
+ });
+ loopSignal_();
+ }
+ if (!state->ok) {
+ // Connection failed, update connectivity
+ opFailed();
+ }
+ *finished = true;
+ });
+ {
+ std::lock_guard<std::mutex> lock(lockOperations_);
+ operations_.emplace_back(std::move(o));
+ }
+}
+
+void
+DhtProxyClient::put(const InfoHash& key, Sp<Value> val, DoneCallback cb, time_point created, bool permanent)
+{
+ DHT_LOG.d(key, "[search %s]: put", key.to_c_str());
+ scheduler.syncTime();
+ if (not val) {
+ if (cb) cb(false, {});
+ return;
+ }
+ if (val->id == Value::INVALID_ID) {
+ crypto::random_device rdev;
+ std::uniform_int_distribution<Value::Id> rand_id {};
+ val->id = rand_id(rdev);
+ }
+ if (permanent) {
+ std::lock_guard<std::mutex> lock(searchLock_);
+ auto id = val->id;
+ auto search = searches_.emplace(key, ProxySearch{}).first;
+ auto nextRefresh = scheduler.time() + proxy::OP_TIMEOUT - proxy::OP_MARGIN;
+ auto ok = std::make_shared<std::atomic_bool>(false);
+ search->second.puts.erase(id);
+ search->second.puts.emplace(id, PermanentPut {val, scheduler.add(nextRefresh, [this, key, id, ok]{
+ std::lock_guard<std::mutex> lock(searchLock_);
+ auto s = searches_.find(key);
+ if (s != searches_.end()) {
+ auto p = s->second.puts.find(id);
+ if (p != s->second.puts.end()) {
+ doPut(key, p->second.value,
+ [ok](bool result, const std::vector<std::shared_ptr<dht::Node> >&){
+ *ok = result;
+ }, time_point::max(), true);
+ scheduler.edit(p->second.refreshJob, scheduler.time() + proxy::OP_TIMEOUT - proxy::OP_MARGIN);
+ }
+ }
+ }), ok});
+ }
+ doPut(key, val, std::move(cb), created, permanent);
+}
+
+void
+DhtProxyClient::doPut(const InfoHash& key, Sp<Value> val, DoneCallback cb, time_point /*created*/, bool permanent)
+{
+ DHT_LOG.d(key, "[search %s] performing put of %s", key.to_c_str(), val->toString().c_str());
+ restbed::Uri uri(proxy::HTTP_PROTO + serverHost_ + "/" + key.toString());
+ auto req = std::make_shared<restbed::Request>(uri);
+ req->set_method("POST");
+
+ auto json = val->toJson();
+ if (permanent) {
+ if (deviceKey_.empty()) {
+ json["permanent"] = true;
+ } else {
+#if OPENDHT_PUSH_NOTIFICATIONS
+ Json::Value refresh;
+ getPushRequest(refresh);
+ json["permanent"] = refresh;
+#else
+ json["permanent"] = true;
+#endif
+ }
+ }
+ Json::StreamWriterBuilder wbuilder;
+ wbuilder["commentStyle"] = "None";
+ wbuilder["indentation"] = "";
+ auto body = Json::writeString(wbuilder, json) + "\n";
+ req->set_body(body);
+ req->set_header("Content-Length", std::to_string(body.size()));
+
+ auto finished = std::make_shared<std::atomic_bool>(false);
+ Operation o;
+ o.req = req;
+ o.finished = finished;
+ o.thread = std::thread([=](){
+ auto ok = std::make_shared<std::atomic_bool>(true);
+ try {
+ restbed::Http::async(req,
+ [ok](const std::shared_ptr<restbed::Request>& /*req*/,
+ const std::shared_ptr<restbed::Response>& reply) {
+ auto code = reply->get_status_code();
+
+ if (code == 200) {
+ restbed::Http::fetch("\n", reply);
+ std::string body;
+ reply->get_body(body);
+ reply->set_body(""); // Reset the body for the next fetch
+
+ try {
+ std::string err;
+ Json::Value json;
+ Json::CharReaderBuilder rbuilder;
+ auto* char_data = reinterpret_cast<const char*>(&body[0]);
+ auto reader = std::unique_ptr<Json::CharReader>(rbuilder.newCharReader());
+ if (not reader->parse(char_data, char_data + body.size(), &json, &err))
+ *ok = false;
+ } catch (...) {
+ *ok = false;
+ }
+ } else {
+ *ok = false;
+ }
+ }).wait();
+ } catch(const std::exception& e) {
+ *ok = false;
+ }
+ if (cb) {
+ std::lock_guard<std::mutex> lock(lockCallbacks);
+ callbacks_.emplace_back([=](){
+ cb(*ok, {});
+ });
+ loopSignal_();
+ }
+ if (!ok) {
+ // Connection failed, update connectivity
+ opFailed();
+ }
+ *finished = true;
+ });
+ {
+ std::lock_guard<std::mutex> lock(lockOperations_);
+ operations_.emplace_back(std::move(o));
+ }
+}
+
+/**
+ * Get data currently being put at the given hash.
+ */
+std::vector<Sp<Value>>
+DhtProxyClient::getPut(const InfoHash& key) {
+ std::vector<Sp<Value>> ret;
+ auto search = searches_.find(key);
+ if (search != searches_.end()) {
+ ret.reserve(search->second.puts.size());
+ for (const auto& put : search->second.puts)
+ ret.emplace_back(put.second.value);
+ }
+ return ret;
+}
+
+/**
+ * Get data currently being put at the given hash with the given id.
+ */
+Sp<Value>
+DhtProxyClient::getPut(const InfoHash& key, const Value::Id& id) {
+ auto search = searches_.find(key);
+ if (search == searches_.end())
+ return {};
+ auto val = search->second.puts.find(id);
+ if (val == search->second.puts.end())
+ return {};
+ return val->second.value;
+}
+
+/**
+ * Stop any put/announce operation at the given location,
+ * for the value with the given id.
+ */
+bool
+DhtProxyClient::cancelPut(const InfoHash& key, const Value::Id& id)
+{
+ auto search = searches_.find(key);
+ if (search == searches_.end())
+ return false;
+ DHT_LOG.d(key, "[search %s] cancel put", key.to_c_str());
+ return search->second.puts.erase(id) > 0;
+}
+
+NodeStats
+DhtProxyClient::getNodesStats(sa_family_t af) const
+{
+ return af == AF_INET ? stats4_ : stats6_;
+}
+
+void
+DhtProxyClient::getProxyInfos()
+{
+ DHT_LOG.d("Requesting proxy server node information");
+ std::lock_guard<std::mutex> l(statusLock_);
+
+ auto infoState = std::make_shared<InfoState>();
+ if (infoState_)
+ infoState_->cancel = true;
+ infoState_ = infoState;
+
+ {
+ std::lock_guard<std::mutex> l(lockCurrentProxyInfos_);
+ if (statusIpv4_ == NodeStatus::Disconnected)
+ statusIpv4_ = NodeStatus::Connecting;
+ if (statusIpv6_ == NodeStatus::Disconnected)
+ statusIpv6_ = NodeStatus::Connecting;
+ }
+
+ // A node can have a Ipv4 and a Ipv6. So, we need to retrieve all public ips
+ auto serverHost = serverHost_;
+
+ // Try to contact the proxy and set the status to connected when done.
+ // will change the connectivity status
+ if (statusThread_.joinable()) {
+ try {
+ statusThread_.detach();
+ statusThread_ = {};
+ } catch (const std::exception& e) {
+ DHT_LOG.e("Error detaching thread: %s", e.what());
+ }
+ }
+ statusThread_ = std::thread([this, serverHost, infoState]{
+ try {
+ auto hostAndService = splitPort(serverHost);
+ auto resolved_proxies = SockAddr::resolve(hostAndService.first, hostAndService.second);
+ std::vector<std::future<Sp<restbed::Response>>> reqs;
+ reqs.reserve(resolved_proxies.size());
+ for (const auto& resolved_proxy: resolved_proxies) {
+ auto server = resolved_proxy.toString();
+ if (resolved_proxy.getFamily() == AF_INET6) {
+ // HACK restbed seems to not correctly handle directly http://[ipv6]
+ // See https://github.com/Corvusoft/restbed/issues/290.
+ server = serverHost;
+ }
+ restbed::Uri uri(proxy::HTTP_PROTO + server + "/");
+ auto req = std::make_shared<restbed::Request>(uri);
+ if (infoState->cancel)
+ return;
+ reqs.emplace_back(restbed::Http::async(req,
+ [this, resolved_proxy, infoState](
+ const std::shared_ptr<restbed::Request>&,
+ const std::shared_ptr<restbed::Response>& reply)
+ {
+ auto code = reply->get_status_code();
+ Json::Value proxyInfos;
+ if (code == 200) {
+ restbed::Http::fetch("\n", reply);
+ auto& state = *infoState;
+ if (state.cancel) return;
+ std::string body;
+ reply->get_body(body);
+
+ std::string err;
+ Json::CharReaderBuilder rbuilder;
+ auto reader = std::unique_ptr<Json::CharReader>(rbuilder.newCharReader());
+ try {
+ reader->parse(body.data(), body.data() + body.size(), &proxyInfos, &err);
+ } catch (...) {
+ return;
+ }
+ auto family = resolved_proxy.getFamily();
+ if (family == AF_INET) state.ipv4++;
+ else if (family == AF_INET6) state.ipv6++;
+ if (not state.cancel)
+ onProxyInfos(proxyInfos, family);
+ }
+ }));
+ }
+ for (auto& r : reqs)
+ r.get();
+ reqs.clear();
+ } catch (const std::exception& e) {
+ DHT_LOG.e("Error sending proxy info request: %s", e.what());
+ }
+ const auto& state = *infoState;
+ if (state.cancel) return;
+ if (state.ipv4 == 0) onProxyInfos(Json::Value{}, AF_INET);
+ if (state.ipv6 == 0) onProxyInfos(Json::Value{}, AF_INET6);
+ });
+}
+
+void
+DhtProxyClient::onProxyInfos(const Json::Value& proxyInfos, sa_family_t family)
+{
+ if (isDestroying_)
+ return;
+ std::lock_guard<std::mutex> l(lockCurrentProxyInfos_);
+ auto oldStatus = std::max(statusIpv4_, statusIpv6_);
+ auto& status = family == AF_INET ? statusIpv4_ : statusIpv6_;
+ if (not proxyInfos.isMember("node_id")) {
+ DHT_LOG.e("Proxy info request failed for %s", family == AF_INET ? "IPv4" : "IPv6");
+ status = NodeStatus::Disconnected;
+ } else {
+ DHT_LOG.d("Got proxy reply for %s", family == AF_INET ? "IPv4" : "IPv6");
+ try {
+ myid = InfoHash(proxyInfos["node_id"].asString());
+ stats4_ = NodeStats(proxyInfos["ipv4"]);
+ stats6_ = NodeStats(proxyInfos["ipv6"]);
+ if (stats4_.good_nodes + stats6_.good_nodes)
+ status = NodeStatus::Connected;
+ else if (stats4_.dubious_nodes + stats6_.dubious_nodes)
+ status = NodeStatus::Connecting;
+ else
+ status = NodeStatus::Disconnected;
+
+ auto publicIp = parsePublicAddress(proxyInfos["public_ip"]);
+ auto publicFamily = publicIp.getFamily();
+ if (publicFamily == AF_INET)
+ publicAddressV4_ = publicIp;
+ else if (publicFamily == AF_INET6)
+ publicAddressV6_ = publicIp;
+ } catch (const std::exception& e) {
+ DHT_LOG.w("Error processing proxy infos: %s", e.what());
+ }
+ }
+
+ auto newStatus = std::max(statusIpv4_, statusIpv6_);
+ if (newStatus == NodeStatus::Connected) {
+ if (oldStatus == NodeStatus::Disconnected || oldStatus == NodeStatus::Connecting) {
+ scheduler.edit(listenerRestart, scheduler.time());
+ }
+ scheduler.edit(nextProxyConfirmation, scheduler.time() + std::chrono::minutes(15));
+ }
+ else if (newStatus == NodeStatus::Disconnected) {
+ scheduler.edit(nextProxyConfirmation, scheduler.time() + std::chrono::minutes(1));
+ }
+ loopSignal_();
+}
+
+SockAddr
+DhtProxyClient::parsePublicAddress(const Json::Value& val)
+{
+ auto public_ip = val.asString();
+ auto hostAndService = splitPort(public_ip);
+ auto sa = SockAddr::resolve(hostAndService.first);
+ if (sa.empty()) return {};
+ return sa.front().getMappedIPv4();
+}
+
+std::vector<SockAddr>
+DhtProxyClient::getPublicAddress(sa_family_t family)
+{
+ std::lock_guard<std::mutex> l(lockCurrentProxyInfos_);
+ std::vector<SockAddr> result;
+ if (publicAddressV6_ && family != AF_INET) result.emplace_back(publicAddressV6_);
+ if (publicAddressV4_ && family != AF_INET6) result.emplace_back(publicAddressV4_);
+ return result;
+}
+
+size_t
+DhtProxyClient::listen(const InfoHash& key, ValueCallback cb, Value::Filter filter, Where where) {
+ DHT_LOG.d(key, "[search %s]: listen", key.to_c_str());
+ auto it = searches_.find(key);
+ if (it == searches_.end()) {
+ it = searches_.emplace(key, ProxySearch{}).first;
+ }
+ auto query = std::make_shared<Query>(Select{}, where);
+ auto token = it->second.ops.listen(cb, query, filter, [&](Sp<Query> /*q*/, ValueCallback vcb){
+ return doListen(key, vcb, filter);
+ });
+ return token;
+}
+
+bool
+DhtProxyClient::cancelListen(const InfoHash& key, size_t gtoken) {
+ scheduler.syncTime();
+ DHT_LOG.d(key, "[search %s]: cancelListen %zu", key.to_c_str(), gtoken);
+ auto it = searches_.find(key);
+ if (it == searches_.end())
+ return false;
+ auto& ops = it->second.ops;
+ bool canceled = ops.cancelListen(gtoken, scheduler.time());
+ if (not it->second.opExpirationJob) {
+ it->second.opExpirationJob = scheduler.add(time_point::max(), [this,key](){
+ auto it = searches_.find(key);
+ if (it != searches_.end()) {
+ auto next = it->second.ops.expire(scheduler.time(), [this,key](size_t ltoken){
+ doCancelListen(key, ltoken);
+ });
+ if (next != time_point::max()) {
+ scheduler.edit(it->second.opExpirationJob, next);
+ }
+ }
+ });
+ }
+ scheduler.edit(it->second.opExpirationJob, ops.getExpiration());
+ loopSignal_();
+ return canceled;
+}
+
+
+void
+DhtProxyClient::sendListen(const std::shared_ptr<restbed::Request>& req, const ValueCallback& cb, const Value::Filter& filter, const Sp<ListenState>& state) {
+ auto settings = std::make_shared<restbed::Settings>();
+ std::chrono::milliseconds timeout(std::numeric_limits<int>::max());
+ settings->set_connection_timeout(timeout); // Avoid the client to close the socket after 5 seconds.
+ req->set_method("LISTEN");
+ try {
+ restbed::Http::async(req,
+ [this, filter, cb, state](const std::shared_ptr<restbed::Request>& req,
+ const std::shared_ptr<restbed::Response>& reply) {
+ auto code = reply->get_status_code();
+ if (code == 200) {
+ try {
+ while (restbed::Http::is_open(req) and not state->cancel) {
+ restbed::Http::fetch("\n", reply);
+ if (state->cancel)
+ break;
+ std::string body;
+ reply->get_body(body);
+ reply->set_body(""); // Reset the body for the next fetch
+
+ Json::Value json;
+ std::string err;
+ Json::CharReaderBuilder rbuilder;
+ auto reader = std::unique_ptr<Json::CharReader>(rbuilder.newCharReader());
+ if (reader->parse(body.data(), body.data() + body.size(), &json, &err)) {
+ auto expired = json.get("expired", Json::Value(false)).asBool();
+ auto value = std::make_shared<Value>(json);
+ if ((not filter or filter(*value)) and cb) {
+ std::lock_guard<std::mutex> lock(lockCallbacks);
+ callbacks_.emplace_back([cb, value, state, expired]() {
+ if (not state->cancel and not cb({value}, expired))
+ state->cancel = true;
+ });
+ loopSignal_();
+ }
+ }
+ }
+ } catch (const std::exception& e) {
+ if (not state->cancel) {
+ DHT_LOG.w("Listen closed by the proxy server: %s", e.what());
+ state->ok = false;
+ }
+ }
+ } else {
+ state->ok = false;
+ }
+ }, settings).get();
+ } catch (const std::exception& e) {
+ state->ok = false;
+ }
+ auto& s = *state;
+ if (not s.ok and not s.cancel)
+ opFailed();
+}
+
+void
+DhtProxyClient::sendSubscribe(const std::shared_ptr<restbed::Request>& req, const Sp<proxy::ListenToken>& token, const Sp<ListenState>& state) {
+#if OPENDHT_PUSH_NOTIFICATIONS
+ req->set_method("SUBSCRIBE");
+ try {
+ fillBodyToGetToken(req);
+ restbed::Http::async(req, [this,state, token](const std::shared_ptr<restbed::Request>&,
+ const std::shared_ptr<restbed::Response>& reply) {
+ auto code = reply->get_status_code();
+ if (code == 200) {
+ try {
+ restbed::Http::fetch("\n", reply);
+ std::string body;
+ reply->get_body(body);
+
+ std::string err;
+ Json::Value json;
+ Json::CharReaderBuilder rbuilder;
+ auto reader = std::unique_ptr<Json::CharReader>(rbuilder.newCharReader());
+ if (reader->parse(body.data(), body.data() + body.size(), &json, &err)) {
+ if (not json.isMember("token")) {
+ state->ok = false;
+ return;
+ }
+ *token = unpackId(json, "token");
+ }
+ } catch (const std::exception& e) {
+ if (not state->cancel) {
+ DHT_LOG.e("sendSubscribe: error: %s", e.what());
+ state->ok = false;
+ }
+ }
+ } else {
+ state->ok = false;
+ }
+ }).get();
+ } catch(const std::exception& e) {
+ state->ok = false;
+ }
+ auto& s = *state;
+ if (not s.ok and not s.cancel)
+ opFailed();
+#endif
+}
+
+size_t
+DhtProxyClient::doListen(const InfoHash& key, ValueCallback cb, Value::Filter filter/*, Where where*/)
+{
+ scheduler.syncTime();
+ restbed::Uri uri(proxy::HTTP_PROTO + serverHost_ + "/" + key.toString());
+ std::lock_guard<std::mutex> lock(searchLock_);
+ auto search = searches_.find(key);
+ if (search == searches_.end()) {
+ DHT_LOG.e(key, "[search %s] listen: search not found", key.to_c_str());
+ return 0;
+ }
+ DHT_LOG.d(key, "[search %s] sending %s", key.to_c_str(), deviceKey_.empty() ? "listen" : "subscribe");
+
+ auto req = std::make_shared<restbed::Request>(uri);
+ auto token = ++listenerToken_;
+ auto l = search->second.listeners.find(token);
+ if (l == search->second.listeners.end()) {
+ auto f = filter;
+ l = search->second.listeners.emplace(token, Listener {
+ ValueCache(cb), scheduler.add(time_point::max(), [this, key, token]{
+ std::lock_guard<std::mutex> lock(searchLock_);
+ auto s = searches_.find(key);
+ if (s == searches_.end()) {
+ return;
+ }
+ auto l = s->second.listeners.find(token);
+ if (l == s->second.listeners.end()) {
+ return;
+ }
+ auto next = l->second.cache.expireValues(scheduler.time());
+ scheduler.edit(l->second.cacheExpirationJob, next);
+ }), req, std::move(f)
+ }).first;
+ } else {
+ if (l->second.state)
+ l->second.state->cancel = true;
+ }
+
+ auto state = std::make_shared<ListenState>();
+ l->second.state = state;
+ l->second.cb = [this,key,token,state](const std::vector<Sp<Value>>& values, bool expired) {
+ if (state->cancel)
+ return false;
+ std::lock_guard<std::mutex> lock(searchLock_);
+ auto s = searches_.find(key);
+ if (s == searches_.end()) {
+ return false;
+ }
+ auto l = s->second.listeners.find(token);
+ if (l == s->second.listeners.end()) {
+ return false;
+ }
+ const std::vector<Sp<Value>> new_values_empty;
+ std::vector<Value::Id> expired_ids;
+ if (expired) {
+ expired_ids.reserve(values.size());
+ for (const auto& v : values)
+ expired_ids.emplace_back(v->id);
+ }
+ auto next = l->second.cache.onValues(expired ? new_values_empty : values, std::vector<Value::Id>{}, expired_ids, types, scheduler.time());
+ scheduler.edit(l->second.cacheExpirationJob, next);
+ loopSignal_();
+ return true;
+ };
+
+ auto pushNotifToken = std::make_shared<proxy::ListenToken>(0);
+ auto vcb = l->second.cb;
+ l->second.pushNotifToken = pushNotifToken;
+ l->second.req = req;
+
+ if (not deviceKey_.empty()) {
+ // Relaunch push listeners even if a timeout is not received (if the proxy crash for any reason)
+ l->second.refreshJob = scheduler.add(scheduler.time() + proxy::OP_TIMEOUT - proxy::OP_MARGIN, [this, key, token, state] {
+ if (state->cancel)
+ return;
+ std::lock_guard<std::mutex> lock(searchLock_);
+ auto s = searches_.find(key);
+ if (s != searches_.end()) {
+ auto l = s->second.listeners.find(token);
+ if (l != s->second.listeners.end()) {
+ resubscribe(key, l->second);
+ }
+ }
+ });
+ l->second.thread = std::thread([this, req,pushNotifToken, state](){
+ sendSubscribe(req,pushNotifToken,state);
+ });
+ } else {
+ l->second.thread = std::thread([this, req, vcb, filter, state]{
+ sendListen(req,vcb,filter,state);
+ });
+ }
+ return token;
+}
+
+bool
+DhtProxyClient::doCancelListen(const InfoHash& key, size_t ltoken)
+{
+ std::lock_guard<std::mutex> lock(searchLock_);
+
+ auto search = searches_.find(key);
+ if (search == searches_.end())
+ return false;
+
+ auto it = search->second.listeners.find(ltoken);
+ if (it == search->second.listeners.end())
+ return false;
+
+ DHT_LOG.d(key, "[search %s] cancel listen", key.to_c_str());
+
+ auto& listener = it->second;
+ listener.state->cancel = true;
+ if (not deviceKey_.empty()) {
+ // First, be sure to have a token
+ if (listener.thread.joinable()) {
+ listener.thread.join();
+ }
+ // UNSUBSCRIBE
+ restbed::Uri uri(proxy::HTTP_PROTO + serverHost_ + "/" + key.toString());
+ auto req = std::make_shared<restbed::Request>(uri);
+ req->set_method("UNSUBSCRIBE");
+ // fill request body
+ Json::Value body;
+ body["key"] = deviceKey_;
+ body["client_id"] = pushClientId_;
+ body["token"] = std::to_string(*listener.pushNotifToken);
+ Json::StreamWriterBuilder wbuilder;
+ wbuilder["commentStyle"] = "None";
+ wbuilder["indentation"] = "";
+ auto content = Json::writeString(wbuilder, body) + "\n";
+ std::replace(content.begin(), content.end(), '\n', ' ');
+ req->set_body(content);
+ req->set_header("Content-Length", std::to_string(content.size()));
+ try {
+ restbed::Http::async(req, [](const std::shared_ptr<restbed::Request>&, const std::shared_ptr<restbed::Response>&){});
+ } catch (const std::exception& e) {
+ DHT_LOG.w(key, "[search %s] cancelListen: Http::async failed: %s", key.to_c_str(), e.what());
+ }
+ } else {
+ // Just stop the request
+ if (listener.thread.joinable()) {
+ // Close connection to stop listener
+ if (listener.req)
+ restbed::Http::close(listener.req);
+ listener.thread.join();
+ }
+ }
+ search->second.listeners.erase(it);
+ DHT_LOG.d(key, "[search %s] cancelListen: %zu listener remaining", key.to_c_str(), search->second.listeners.size());
+ if (search->second.listeners.empty()) {
+ searches_.erase(search);
+ }
+
+ return true;
+}
+
+void
+DhtProxyClient::opFailed()
+{
+ DHT_LOG.e("Proxy request failed");
+ {
+ std::lock_guard<std::mutex> l(lockCurrentProxyInfos_);
+ statusIpv4_ = NodeStatus::Disconnected;
+ statusIpv6_ = NodeStatus::Disconnected;
+ }
+ getConnectivityStatus();
+ loopSignal_();
+}
+
+void
+DhtProxyClient::getConnectivityStatus()
+{
+ if (!isDestroying_) getProxyInfos();
+}
+
+void
+DhtProxyClient::restartListeners()
+{
+ if (isDestroying_) return;
+ std::lock_guard<std::mutex> lock(searchLock_);
+ DHT_LOG.d("Refresh permanent puts");
+ for (auto& search : searches_) {
+ for (auto& put : search.second.puts) {
+ if (!*put.second.ok) {
+ auto ok = put.second.ok;
+ doPut(search.first, put.second.value,
+ [ok](bool result, const std::vector<std::shared_ptr<dht::Node> >&){
+ *ok = result;
+ }, time_point::max(), true);
+ scheduler.edit(put.second.refreshJob, scheduler.time() + proxy::OP_TIMEOUT - proxy::OP_MARGIN);
+ }
+ }
+ }
+ if (not deviceKey_.empty()) {
+ DHT_LOG.d("resubscribe due to a connectivity change");
+ // Connectivity changed, refresh all subscribe
+ for (auto& search : searches_)
+ for (auto& listener : search.second.listeners)
+ if (!listener.second.state->ok)
+ resubscribe(search.first, listener.second);
+ return;
+ }
+ DHT_LOG.d("Restarting listeners");
+ for (auto& search: searches_) {
+ for (auto& l: search.second.listeners) {
+ auto& listener = l.second;
+ auto state = listener.state;
+ if (listener.thread.joinable()) {
+ state->cancel = true;
+ if (listener.req)
+ restbed::Http::close(listener.req);
+ listener.thread.join();
+ }
+ // Redo listen
+ state->cancel = false;
+ state->ok = true;
+ auto filter = listener.filter;
+ auto cb = listener.cb;
+ restbed::Uri uri(proxy::HTTP_PROTO + serverHost_ + "/" + search.first.toString());
+ auto req = std::make_shared<restbed::Request>(uri);
+ req->set_method("LISTEN");
+ listener.req = req;
+ listener.thread = std::thread([this, req, cb, filter, state]() {
+ sendListen(req, cb, filter, state);
+ });
+ }
+ }
+}
+
+void
+DhtProxyClient::pushNotificationReceived(const std::map<std::string, std::string>& notification)
+{
+#if OPENDHT_PUSH_NOTIFICATIONS
+ scheduler.syncTime();
+ {
+ // If a push notification is received, the proxy is up and running
+ std::lock_guard<std::mutex> l(lockCurrentProxyInfos_);
+ statusIpv4_ = NodeStatus::Connected;
+ statusIpv6_ = NodeStatus::Connected;
+ }
+ try {
+ std::lock_guard<std::mutex> lock(searchLock_);
+ auto timeout = notification.find("timeout");
+ if (timeout != notification.cend()) {
+ InfoHash key(timeout->second);
+ auto& search = searches_.at(key);
+ auto vidIt = notification.find("vid");
+ if (vidIt != notification.end()) {
+ // Refresh put
+ auto vid = std::stoull(vidIt->second);
+ auto& put = search.puts.at(vid);
+ scheduler.edit(put.refreshJob, scheduler.time());
+ loopSignal_();
+ } else {
+ // Refresh listen
+ for (auto& list : search.listeners)
+ resubscribe(key, list.second);
+ }
+ } else {
+ auto token = std::stoull(notification.at("token"));
+ for (auto& search: searches_) {
+ for (auto& list : search.second.listeners) {
+ if (*list.second.pushNotifToken != token or list.second.state->cancel)
+ continue;
+ DHT_LOG.d(search.first, "[search %s] handling push notification", search.first.to_c_str());
+ auto cb = list.second.cb;
+ auto filter = list.second.filter;
+ get(search.first, [cb](const std::vector<Sp<Value>>& vals) {
+ cb(vals, false);
+ return true;
+ }, DoneCallbackSimple{}, std::move(filter));
+ }
+ }
+ }
+ } catch (const std::exception& e) {
+ DHT_LOG.e("Error handling push notification: %s", e.what());
+ }
+#endif
+}
+
+void
+DhtProxyClient::resubscribe(const InfoHash& key, Listener& listener)
+{
+#if OPENDHT_PUSH_NOTIFICATIONS
+ if (deviceKey_.empty()) return;
+ scheduler.syncTime();
+ DHT_LOG.d(key, "[search %s] resubscribe push listener", key.to_c_str());
+ // Subscribe
+ restbed::Uri uri(proxy::HTTP_PROTO + serverHost_ + "/" + key.toString());
+ auto req = std::make_shared<restbed::Request>(uri);
+ req->set_method("SUBSCRIBE");
+
+ auto pushNotifToken = std::make_shared<proxy::ListenToken>(0);
+ auto state = listener.state;
+ if (listener.thread.joinable())
+ listener.thread.join();
+ state->cancel = false;
+ state->ok = true;
+ listener.req = req;
+ listener.pushNotifToken = pushNotifToken;
+ scheduler.edit(listener.refreshJob, scheduler.time() + proxy::OP_TIMEOUT - proxy::OP_MARGIN);
+ listener.thread = std::thread([this, req, pushNotifToken, state]() {
+ sendSubscribe(req, pushNotifToken, state);
+ });
+#endif
+}
+
+#if OPENDHT_PUSH_NOTIFICATIONS
+void
+DhtProxyClient::getPushRequest(Json::Value& body) const
+{
+ body["key"] = deviceKey_;
+ body["client_id"] = pushClientId_;
+#ifdef __ANDROID__
+ body["platform"] = "android";
+#endif
+#ifdef __APPLE__
+ body["platform"] = "apple";
+#endif
+}
+
+void
+DhtProxyClient::fillBodyToGetToken(std::shared_ptr<restbed::Request> req, unsigned token)
+{
+ // Fill body with
+ // {
+ // "key":"device_key",
+ // "token": xxx
+ // }
+ Json::Value body;
+ getPushRequest(body);
+ if (token > 0)
+ body["token"] = token;
+ Json::StreamWriterBuilder wbuilder;
+ wbuilder["commentStyle"] = "None";
+ wbuilder["indentation"] = "";
+ auto content = Json::writeString(wbuilder, body) + "\n";
+ std::replace(content.begin(), content.end(), '\n', ' ');
+ req->set_body(content);
+ req->set_header("Content-Length", std::to_string(content.size()));
+}
+#endif // OPENDHT_PUSH_NOTIFICATIONS
+
+} // namespace dht
+
+#endif // OPENDHT_PROXY_CLIENT
--- /dev/null
+/*
+ * Copyright (C) 2017-2018 Savoir-faire Linux Inc.
+ * Author: Sébastien Blin <sebastien.blin@savoirfairelinux.com>
+ * Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#if OPENDHT_PROXY_SERVER
+#include "dht_proxy_server.h"
+
+#include "default_types.h"
+#include "dhtrunner.h"
+
+#include <msgpack.hpp>
+#include <json/json.h>
+
+#include <chrono>
+#include <functional>
+#include <limits>
+#include <iostream>
+
+using namespace std::placeholders;
+
+namespace dht {
+
+struct DhtProxyServer::PermanentPut {
+ time_point expiration;
+ std::string pushToken;
+ std::string clientId;
+ Sp<Scheduler::Job> expireJob;
+ Sp<Scheduler::Job> expireNotifyJob;
+};
+struct DhtProxyServer::SearchPuts {
+ std::map<dht::Value::Id, PermanentPut> puts;
+};
+
+constexpr const std::chrono::minutes PRINT_STATS_PERIOD {2};
+
+DhtProxyServer::DhtProxyServer(std::shared_ptr<DhtRunner> dht, in_port_t port , const std::string& pushServer)
+: dht_(dht) , pushServer_(pushServer)
+{
+ if (not dht_)
+ throw std::invalid_argument("A DHT instance must be provided");
+ // NOTE in c++14, use make_unique
+ service_ = std::unique_ptr<restbed::Service>(new restbed::Service());
+
+ std::cout << "Running DHT proxy server on port " << port << std::endl;
+ if (not pushServer.empty()) {
+#if !OPENDHT_PUSH_NOTIFICATIONS
+ std::cerr << "Push server defined but built OpenDHT built without push notification support" << std::endl;
+#else
+ std::cout << "Using push notification server: " << pushServer << std::endl;
+#endif
+ }
+
+ server_thread = std::thread([this, port]() {
+ // Create endpoints
+ auto resource = std::make_shared<restbed::Resource>();
+ resource->set_path("/");
+ resource->set_method_handler("GET", std::bind(&DhtProxyServer::getNodeInfo, this, _1));
+ resource->set_method_handler("STATS", std::bind(&DhtProxyServer::getStats, this, _1));
+ service_->publish(resource);
+ resource = std::make_shared<restbed::Resource>();
+ resource->set_path("/{hash: .*}");
+ resource->set_method_handler("GET", std::bind(&DhtProxyServer::get, this, _1));
+ resource->set_method_handler("LISTEN", [this](const Sp<restbed::Session>& session) mutable { listen(session); } );
+#if OPENDHT_PUSH_NOTIFICATIONS
+ resource->set_method_handler("SUBSCRIBE", [this](const Sp<restbed::Session>& session) mutable { subscribe(session); } );
+ resource->set_method_handler("UNSUBSCRIBE", [this](const Sp<restbed::Session>& session) mutable { unsubscribe(session); } );
+#endif //OPENDHT_PUSH_NOTIFICATIONS
+ resource->set_method_handler("POST", [this](const Sp<restbed::Session>& session) mutable { put(session); });
+#if OPENDHT_PROXY_SERVER_IDENTITY
+ resource->set_method_handler("SIGN", std::bind(&DhtProxyServer::putSigned, this, _1));
+ resource->set_method_handler("ENCRYPT", std::bind(&DhtProxyServer::putEncrypted, this, _1));
+#endif // OPENDHT_PROXY_SERVER_IDENTITY
+ resource->set_method_handler("OPTIONS", std::bind(&DhtProxyServer::handleOptionsMethod, this, _1));
+ service_->publish(resource);
+ resource = std::make_shared<restbed::Resource>();
+ resource->set_path("/{hash: .*}/{value: .*}");
+ resource->set_method_handler("GET", std::bind(&DhtProxyServer::getFiltered, this, _1));
+ service_->publish(resource);
+
+ // Start server
+ auto settings = std::make_shared<restbed::Settings>();
+ settings->set_default_header("Content-Type", "application/json");
+ settings->set_default_header("Connection", "keep-alive");
+ settings->set_default_header("Access-Control-Allow-Origin", "*");
+ std::chrono::milliseconds timeout(std::numeric_limits<int>::max());
+ settings->set_connection_timeout(timeout); // there is a timeout, but really huge
+ settings->set_port(port);
+ auto maxThreads = std::thread::hardware_concurrency() - 1;
+ settings->set_worker_limit(maxThreads > 1 ? maxThreads : 1);
+ lastStatsReset_ = clock::now();
+ try {
+ service_->start(settings);
+ } catch(std::system_error& e) {
+ std::cerr << "Error running server on port " << port << ": " << e.what() << std::endl;
+ }
+ });
+
+ listenThread_ = std::thread([this]() {
+ while (not service_->is_up() and not stopListeners) {
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+ }
+ while (service_->is_up() and not stopListeners) {
+ removeClosedListeners();
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+ }
+ // Remove last listeners
+ removeClosedListeners(false);
+ });
+ schedulerThread_ = std::thread([this]() {
+ while (not service_->is_up() and not stopListeners) {
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+ }
+ while (service_->is_up() and not stopListeners) {
+ std::unique_lock<std::mutex> lock(schedulerLock_);
+ auto next = scheduler_.run();
+ if (next == time_point::max())
+ schedulerCv_.wait(lock);
+ else
+ schedulerCv_.wait_until(lock, next);
+ }
+ });
+ dht->forwardAllMessages(true);
+ printStatsJob_ = scheduler_.add(scheduler_.time() + PRINT_STATS_PERIOD, [this] {
+ if (stopListeners) return;
+ if (service_->is_up())
+ updateStats();
+ // Refresh stats cache
+ auto newInfo = dht_->getNodeInfo();
+ {
+ std::lock_guard<std::mutex> lck(statsMutex_);
+ nodeInfo_ = std::move(newInfo);
+ }
+ scheduler_.edit(printStatsJob_, scheduler_.time() + PRINT_STATS_PERIOD);
+ });
+}
+
+DhtProxyServer::~DhtProxyServer()
+{
+ stop();
+}
+
+void
+DhtProxyServer::stop()
+{
+ if (printStatsJob_)
+ printStatsJob_->cancel();
+ service_->stop();
+ {
+ std::lock_guard<std::mutex> lock(lockListener_);
+ auto listener = currentListeners_.begin();
+ while (listener != currentListeners_.end()) {
+ listener->session->close();
+ ++listener;
+ }
+ }
+ stopListeners = true;
+ schedulerCv_.notify_all();
+ // listenThreads_ will stop because there is no more sessions
+ if (listenThread_.joinable())
+ listenThread_.join();
+ if (schedulerThread_.joinable())
+ schedulerThread_.join();
+ if (server_thread.joinable())
+ server_thread.join();
+}
+
+void
+DhtProxyServer::updateStats() const
+{
+ auto now = clock::now();
+ auto last = lastStatsReset_.exchange(now);
+ auto count = requestNum_.exchange(0);
+ auto dt = std::chrono::duration<double>(now - last);
+ stats_.requestRate = count / dt.count();
+#if OPENDHT_PUSH_NOTIFICATIONS
+ stats_.pushListenersCount = pushListeners_.size();
+#endif
+ stats_.putCount = puts_.size();
+ stats_.listenCount = currentListeners_.size();
+ stats_.nodeInfo = nodeInfo_;
+}
+
+void
+DhtProxyServer::getNodeInfo(const Sp<restbed::Session>& session) const
+{
+ requestNum_++;
+ const auto request = session->get_request();
+ int content_length = std::stoi(request->get_header("Content-Length", "0"));
+ session->fetch(content_length,
+ [this](const Sp<restbed::Session>& s, const restbed::Bytes& /*b*/) mutable
+ {
+ try {
+ if (dht_) {
+ Json::Value result;
+ {
+ std::lock_guard<std::mutex> lck(statsMutex_);
+ if (nodeInfo_.ipv4.good_nodes == 0 && nodeInfo_.ipv6.good_nodes == 0) {
+ // NOTE: we want to avoid the disconnected state as much as possible
+ // So, if the node is disconnected, we should force the update of the cache
+ // and reconnect as soon as possible
+ // This should not happen much
+ nodeInfo_ = dht_->getNodeInfo();
+ }
+ result = nodeInfo_.toJson();
+ }
+ result["public_ip"] = s->get_origin(); // [ipv6:ipv4]:port or ipv4:port
+ Json::StreamWriterBuilder wbuilder;
+ wbuilder["commentStyle"] = "None";
+ wbuilder["indentation"] = "";
+ auto output = Json::writeString(wbuilder, result) + "\n";
+ s->close(restbed::OK, output);
+ }
+ else
+ s->close(restbed::SERVICE_UNAVAILABLE, "{\"err\":\"Incorrect DhtRunner\"}");
+ } catch (...) {
+ s->close(restbed::INTERNAL_SERVER_ERROR, "{\"err\":\"Internal server error\"}");
+ }
+ }
+ );
+}
+
+void
+DhtProxyServer::getStats(const Sp<restbed::Session>& session) const
+{
+ requestNum_++;
+ const auto request = session->get_request();
+ int content_length = std::stoi(request->get_header("Content-Length", "0"));
+ session->fetch(content_length,
+ [this](const Sp<restbed::Session>& s, const restbed::Bytes& /*b*/) mutable
+ {
+ try {
+ if (dht_) {
+#ifdef OPENDHT_JSONCPP
+ Json::StreamWriterBuilder wbuilder;
+ wbuilder["commentStyle"] = "None";
+ wbuilder["indentation"] = "";
+ auto output = Json::writeString(wbuilder, stats_.toJson()) + "\n";
+ s->close(restbed::OK, output);
+#else
+ s->close(restbed::NotFound, "{\"err\":\"JSON not enabled on this instance\"}");
+#endif
+ }
+ else
+ s->close(restbed::SERVICE_UNAVAILABLE, "{\"err\":\"Incorrect DhtRunner\"}");
+ } catch (...) {
+ s->close(restbed::INTERNAL_SERVER_ERROR, "{\"err\":\"Internal server error\"}");
+ }
+ }
+ );
+}
+
+void
+DhtProxyServer::get(const Sp<restbed::Session>& session) const
+{
+ requestNum_++;
+ const auto request = session->get_request();
+ int content_length = std::stoi(request->get_header("Content-Length", "0"));
+ auto hash = request->get_path_parameter("hash");
+ session->fetch(content_length,
+ [=](const Sp<restbed::Session>& s, const restbed::Bytes& /*b* */)
+ {
+ try {
+ if (dht_) {
+ InfoHash infoHash(hash);
+ if (!infoHash) {
+ infoHash = InfoHash::get(hash);
+ }
+ s->yield(restbed::OK, "", [=](const Sp<restbed::Session>&) {});
+ dht_->get(infoHash, [s](const Sp<Value>& value) {
+ if (s->is_closed()) return false;
+ // Send values as soon as we get them
+ Json::StreamWriterBuilder wbuilder;
+ wbuilder["commentStyle"] = "None";
+ wbuilder["indentation"] = "";
+ auto output = Json::writeString(wbuilder, value->toJson()) + "\n";
+ s->yield(output, [](const Sp<restbed::Session>& /*session*/){ });
+ return true;
+ }, [s](bool /*ok* */) {
+ // Communication is finished
+ if (not s->is_closed()) {
+ s->close();
+ }
+ });
+ } else {
+ s->close(restbed::SERVICE_UNAVAILABLE, "{\"err\":\"Incorrect DhtRunner\"}");
+ }
+ } catch (...) {
+ s->close(restbed::INTERNAL_SERVER_ERROR, "{\"err\":\"Internal server error\"}");
+ }
+ }
+ );
+}
+
+void
+DhtProxyServer::listen(const Sp<restbed::Session>& session)
+{
+ requestNum_++;
+ const auto request = session->get_request();
+ int content_length = std::stoi(request->get_header("Content-Length", "0"));
+ auto hash = request->get_path_parameter("hash");
+ InfoHash infoHash(hash);
+ if (!infoHash)
+ infoHash = InfoHash::get(hash);
+ session->fetch(content_length,
+ [=](const Sp<restbed::Session>& s, const restbed::Bytes& /*b* */)
+ {
+ try {
+ if (dht_) {
+ InfoHash infoHash(hash);
+ if (!infoHash) {
+ infoHash = InfoHash::get(hash);
+ }
+ s->yield(restbed::OK);
+ // Handle client deconnection
+ // NOTE: for now, there is no handler, so we test the session in a thread
+ // will be the case in restbed 5.0
+ SessionToHashToken listener;
+ listener.session = session;
+ listener.hash = infoHash;
+ // cache the session to avoid an incrementation of the shared_ptr's counter
+ // else, the session->close() will not close the socket.
+ auto cacheSession = std::weak_ptr<restbed::Session>(s);
+ listener.token = dht_->listen(infoHash, [cacheSession](const std::vector<Sp<Value>>& values, bool expired) {
+ auto s = cacheSession.lock();
+ if (!s) return false;
+ // Send values as soon as we get them
+ if (!s->is_closed()) {
+ Json::StreamWriterBuilder wbuilder;
+ wbuilder["commentStyle"] = "None";
+ wbuilder["indentation"] = "";
+ for (const auto& value : values) {
+ auto val = value->toJson();
+ if (expired)
+ val["expired"] = true;
+ auto output = Json::writeString(wbuilder, val) + "\n";
+ s->yield(output, [](const Sp<restbed::Session>&){ });
+ }
+ }
+ return !s->is_closed();
+ });
+ {
+ std::lock_guard<std::mutex> lock(lockListener_);
+ currentListeners_.emplace_back(std::move(listener));
+ }
+ } else {
+ session->close(restbed::SERVICE_UNAVAILABLE, "{\"err\":\"Incorrect DhtRunner\"}");
+ }
+ } catch (...) {
+ s->close(restbed::INTERNAL_SERVER_ERROR, "{\"err\":\"Internal server error\"}");
+ }
+ }
+ );
+}
+
+#if OPENDHT_PUSH_NOTIFICATIONS
+
+struct DhtProxyServer::Listener {
+ std::shared_ptr<proxy::ListenToken> token;
+ std::string clientId;
+ std::future<size_t> internalToken;
+ Sp<Scheduler::Job> expireJob;
+ Sp<Scheduler::Job> expireNotifyJob;
+};
+struct DhtProxyServer::PushListener {
+ std::map<InfoHash, std::vector<Listener>> listeners;
+ bool isAndroid;
+};
+
+void
+DhtProxyServer::subscribe(const std::shared_ptr<restbed::Session>& session)
+{
+ requestNum_++;
+ const auto request = session->get_request();
+ int content_length = std::stoi(request->get_header("Content-Length", "0"));
+ auto hash = request->get_path_parameter("hash");
+ InfoHash infoHash(hash);
+ if (!infoHash)
+ infoHash = InfoHash::get(hash);
+ session->fetch(content_length,
+ [=](const std::shared_ptr<restbed::Session> s, const restbed::Bytes& b) mutable
+ {
+ try {
+ std::string err;
+ Json::Value root;
+ Json::CharReaderBuilder rbuilder;
+ auto* char_data = reinterpret_cast<const char*>(b.data());
+ auto reader = std::unique_ptr<Json::CharReader>(rbuilder.newCharReader());
+ if (!reader->parse(char_data, char_data + b.size(), &root, &err)) {
+ s->close(restbed::BAD_REQUEST, "{\"err\":\"Incorrect JSON\"}");
+ return;
+ }
+ auto pushToken = root["key"].asString();
+ if (pushToken.empty()) {
+ s->close(restbed::BAD_REQUEST, "{\"err\":\"No token\"}");
+ return;
+ }
+ auto tokenFromReq = unpackId(root, "token");
+ auto platform = root["platform"].asString();
+ auto isAndroid = platform == "android";
+ auto clientId = root.isMember("client_id") ? root["client_id"].asString() : std::string();
+
+ std::cout << "Subscribe " << infoHash << " token:" << tokenFromReq << " client:" << clientId << std::endl;
+
+ {
+ std::lock(schedulerLock_, lockListener_);
+ std::lock_guard<std::mutex> lk1(lockListener_, std::adopt_lock);
+ std::lock_guard<std::mutex> lk2(schedulerLock_, std::adopt_lock);
+ scheduler_.syncTime();
+ auto timeout = scheduler_.time() + proxy::OP_TIMEOUT;
+ // Check if listener is already present and refresh timeout if launched
+ // One push listener per pushToken.infoHash.clientId
+ auto pushListener = pushListeners_.emplace(pushToken, PushListener{}).first;
+ auto listeners = pushListener->second.listeners.emplace(infoHash, std::vector<Listener>{}).first;
+ for (auto& listener: listeners->second) {
+ if (listener.clientId == clientId) {
+ *listener.token = tokenFromReq;
+ scheduler_.edit(listener.expireJob, timeout);
+ scheduler_.edit(listener.expireNotifyJob, timeout - proxy::OP_MARGIN);
+ s->close(restbed::OK, "{\"token\": " + std::to_string(tokenFromReq) + "}\n");
+ schedulerCv_.notify_one();
+ return;
+ }
+ }
+ listeners->second.emplace_back(Listener{});
+ auto& listener = listeners->second.back();
+ listener.clientId = clientId;
+
+ // New listener
+ pushListener->second.isAndroid = isAndroid;
+
+ // The listener is not found, so add it.
+ auto token = std::make_shared<proxy::ListenToken>(tokenFromReq);
+ listener.token = token;
+ listener.internalToken = dht_->listen(infoHash,
+ [this, token, infoHash, pushToken, isAndroid, clientId](std::vector<std::shared_ptr<Value>> /*value*/) {
+ // Build message content.
+ Json::Value json;
+ json["key"] = infoHash.toString();
+ json["to"] = clientId;
+ json["token"] = std::to_string(*token);
+ sendPushNotification(pushToken, json, isAndroid);
+ return true;
+ }
+ );
+ listener.expireJob = scheduler_.add(timeout,
+ [this, clientId, infoHash, pushToken] {
+ cancelPushListen(pushToken, infoHash, clientId);
+ }
+ );
+ listener.expireNotifyJob = scheduler_.add(timeout - proxy::OP_MARGIN,
+ [this, token, infoHash, pushToken, isAndroid, clientId] {
+ std::cout << "Listener: sending refresh " << infoHash << std::endl;
+ Json::Value json;
+ json["timeout"] = infoHash.toString();
+ json["to"] = clientId;
+ json["token"] = std::to_string(*token);
+ sendPushNotification(pushToken, json, isAndroid);
+ }
+ );
+ }
+ schedulerCv_.notify_one();
+ s->close(restbed::OK, "{\"token\": " + std::to_string(tokenFromReq) + "}\n");
+ } catch (...) {
+ s->close(restbed::INTERNAL_SERVER_ERROR, "{\"err\":\"Internal server error\"}");
+ }
+ }
+ );
+}
+
+void
+DhtProxyServer::unsubscribe(const std::shared_ptr<restbed::Session>& session)
+{
+ requestNum_++;
+ const auto request = session->get_request();
+ int content_length = std::stoi(request->get_header("Content-Length", "0"));
+ auto hash = request->get_path_parameter("hash");
+ InfoHash infoHash(hash);
+ if (!infoHash)
+ infoHash = InfoHash::get(hash);
+ session->fetch(content_length,
+ [=](const std::shared_ptr<restbed::Session> s, const restbed::Bytes& b)
+ {
+ try {
+ std::string err;
+ Json::Value root;
+ Json::CharReaderBuilder rbuilder;
+ auto* char_data = reinterpret_cast<const char*>(b.data());
+ auto reader = std::unique_ptr<Json::CharReader>(rbuilder.newCharReader());
+ if (!reader->parse(char_data, char_data + b.size(), &root, &err)) {
+ s->close(restbed::BAD_REQUEST, "{\"err\":\"Incorrect JSON\"}");
+ return;
+ }
+ auto pushToken = root["key"].asString();
+ if (pushToken.empty()) return;
+ auto clientId = root["client_id"].asString();
+
+ cancelPushListen(pushToken, infoHash, clientId);
+ s->close(restbed::OK);
+ } catch (...) {
+ s->close(restbed::INTERNAL_SERVER_ERROR, "{\"err\":\"Internal server error\"}");
+ }
+ }
+ );
+}
+
+void
+DhtProxyServer::cancelPushListen(const std::string& pushToken, const dht::InfoHash& key, const std::string& clientId)
+{
+ std::cout << "cancelPushListen: " << key << " clientId:" << clientId << std::endl;
+ std::lock_guard<std::mutex> lock(lockListener_);
+ auto pushListener = pushListeners_.find(pushToken);
+ if (pushListener == pushListeners_.end())
+ return;
+ auto listeners = pushListener->second.listeners.find(key);
+ if (listeners == pushListener->second.listeners.end())
+ return;
+ for (auto listener = listeners->second.begin(); listener != listeners->second.end();) {
+ if (listener->clientId == clientId) {
+ if (dht_)
+ dht_->cancelListen(key, std::move(listener->internalToken));
+ listener = listeners->second.erase(listener);
+ } else {
+ ++listener;
+ }
+ }
+ if (listeners->second.empty()) {
+ pushListener->second.listeners.erase(listeners);
+ }
+ if (pushListener->second.listeners.empty()) {
+ pushListeners_.erase(pushListener);
+ }
+}
+
+void
+DhtProxyServer::sendPushNotification(const std::string& token, const Json::Value& json, bool isAndroid) const
+{
+ restbed::Uri uri(proxy::HTTP_PROTO + pushServer_ + "/api/push");
+ auto req = std::make_shared<restbed::Request>(uri);
+ req->set_method("POST");
+
+ // NOTE: see https://github.com/appleboy/gorush
+ Json::Value notification(Json::objectValue);
+ Json::Value tokens(Json::arrayValue);
+ tokens[0] = token;
+ notification["tokens"] = tokens;
+ notification["platform"] = isAndroid ? 2 : 1;
+ notification["data"] = json;
+ notification["priority"] = "high";
+ notification["time_to_live"] = 600;
+
+ Json::Value notifications(Json::arrayValue);
+ notifications[0] = notification;
+
+ Json::Value content;
+ content["notifications"] = notifications;
+
+ Json::StreamWriterBuilder wbuilder;
+ wbuilder["commentStyle"] = "None";
+ wbuilder["indentation"] = "";
+ auto valueStr = Json::writeString(wbuilder, content);
+
+ req->set_header("Content-Type", "application/json");
+ req->set_header("Accept", "*/*");
+ req->set_header("Host", pushServer_);
+ req->set_header("Content-Length", std::to_string(valueStr.length()));
+ req->set_body(valueStr);
+
+ // Send request.
+ restbed::Http::async(req, {});
+}
+
+#endif //OPENDHT_PUSH_NOTIFICATIONS
+
+void
+DhtProxyServer::cancelPut(const InfoHash& key, Value::Id vid)
+{
+ std::cout << "cancelPut " << key << " " << vid << std::endl;
+ auto sPuts = puts_.find(key);
+ if (sPuts == puts_.end())
+ return;
+ auto& sPutsMap = sPuts->second.puts;
+ auto put = sPutsMap.find(vid);
+ if (put == sPutsMap.end())
+ return;
+ if (dht_)
+ dht_->cancelPut(key, vid);
+ if (put->second.expireNotifyJob)
+ put->second.expireNotifyJob->cancel();
+ sPutsMap.erase(put);
+ if (sPutsMap.empty())
+ puts_.erase(sPuts);
+}
+
+void
+DhtProxyServer::put(const std::shared_ptr<restbed::Session>& session)
+{
+ requestNum_++;
+ const auto request = session->get_request();
+ int content_length = std::stoi(request->get_header("Content-Length", "0"));
+ auto hash = request->get_path_parameter("hash");
+ InfoHash infoHash(hash);
+ if (!infoHash)
+ infoHash = InfoHash::get(hash);
+
+ session->fetch(content_length,
+ [=](const std::shared_ptr<restbed::Session> s, const restbed::Bytes& b)
+ {
+ try {
+ if (dht_) {
+ if(b.empty()) {
+ std::string response("{\"err\":\"Missing parameters\"}");
+ s->close(restbed::BAD_REQUEST, response);
+ } else {
+ std::string err;
+ Json::Value root;
+ Json::CharReaderBuilder rbuilder;
+ auto* char_data = reinterpret_cast<const char*>(b.data());
+ auto reader = std::unique_ptr<Json::CharReader>(rbuilder.newCharReader());
+ if (reader->parse(char_data, char_data + b.size(), &root, &err)) {
+ // Build the Value from json
+ auto value = std::make_shared<Value>(root);
+ bool permanent = root.isMember("permanent");
+ std::cout << "Got put " << infoHash << " " << *value << " " << (permanent ? "permanent" : "") << std::endl;
+
+ if (permanent) {
+ std::string pushToken, clientId, platform;
+ auto& pVal = root["permanent"];
+ if (pVal.isObject()) {
+ pushToken = pVal["key"].asString();
+ clientId = pVal["client_id"].asString();
+ platform = pVal["platform"].asString();
+ }
+ bool isAndroid = platform == "android";
+ std::unique_lock<std::mutex> lock(schedulerLock_);
+ scheduler_.syncTime();
+ auto timeout = scheduler_.time() + proxy::OP_TIMEOUT;
+ auto vid = value->id;
+ auto sPuts = puts_.emplace(infoHash, SearchPuts{}).first;
+ auto r = sPuts->second.puts.emplace(vid, PermanentPut{});
+ auto& pput = r.first->second;
+ if (r.second) {
+ pput.expireJob = scheduler_.add(timeout, [this, infoHash, vid]{
+ std::cout << "Permanent put expired: " << infoHash << " " << vid << std::endl;
+ cancelPut(infoHash, vid);
+ });
+#if OPENDHT_PUSH_NOTIFICATIONS
+ if (not pushToken.empty()) {
+ pput.expireNotifyJob = scheduler_.add(timeout - proxy::OP_MARGIN,
+ [this, infoHash, vid, pushToken, clientId, isAndroid]
+ {
+ std::cout << "Permanent put refresh: " << infoHash << " " << vid << std::endl;
+ Json::Value json;
+ json["timeout"] = infoHash.toString();
+ json["to"] = clientId;
+ json["vid"] = std::to_string(vid);
+ sendPushNotification(pushToken, json, isAndroid);
+ });
+ }
+#endif
+ } else {
+ scheduler_.edit(pput.expireJob, timeout);
+ if (pput.expireNotifyJob)
+ scheduler_.edit(pput.expireNotifyJob, timeout - proxy::OP_MARGIN);
+ }
+ lock.unlock();
+ schedulerCv_.notify_one();
+ }
+
+ dht_->put(infoHash, value, [s, value](bool ok) {
+ if (ok) {
+ Json::StreamWriterBuilder wbuilder;
+ wbuilder["commentStyle"] = "None";
+ wbuilder["indentation"] = "";
+ if (s->is_open())
+ s->close(restbed::OK, Json::writeString(wbuilder, value->toJson()) + "\n");
+ } else {
+ if (s->is_open())
+ s->close(restbed::BAD_GATEWAY, "{\"err\":\"put failed\"}");
+ }
+ }, time_point::max(), permanent);
+ } else {
+ s->close(restbed::BAD_REQUEST, "{\"err\":\"Incorrect JSON\"}");
+ }
+ }
+ } else {
+ s->close(restbed::SERVICE_UNAVAILABLE, "{\"err\":\"Incorrect DhtRunner\"}");
+ }
+ } catch (const std::exception& e) {
+ std::cout << "Error performing put: " << e.what() << std::endl;
+ s->close(restbed::INTERNAL_SERVER_ERROR, "{\"err\":\"Internal server error\"}");
+ }
+ }
+ );
+}
+
+#if OPENDHT_PROXY_SERVER_IDENTITY
+void
+DhtProxyServer::putSigned(const std::shared_ptr<restbed::Session>& session) const
+{
+ requestNum_++;
+ const auto request = session->get_request();
+ int content_length = std::stoi(request->get_header("Content-Length", "0"));
+ auto hash = request->get_path_parameter("hash");
+ InfoHash infoHash(hash);
+ if (!infoHash)
+ infoHash = InfoHash::get(hash);
+
+ session->fetch(content_length,
+ [=](const std::shared_ptr<restbed::Session> s, const restbed::Bytes& b)
+ {
+ try {
+ if (dht_) {
+ if(b.empty()) {
+ std::string response("{\"err\":\"Missing parameters\"}");
+ s->close(restbed::BAD_REQUEST, response);
+ } else {
+ std::string err;
+ Json::Value root;
+ Json::CharReaderBuilder rbuilder;
+ auto* char_data = reinterpret_cast<const char*>(b.data());
+ auto reader = std::unique_ptr<Json::CharReader>(rbuilder.newCharReader());
+ if (reader->parse(char_data, char_data + b.size(), &root, &err)) {
+ auto value = std::make_shared<Value>(root);
+
+ Json::StreamWriterBuilder wbuilder;
+ wbuilder["commentStyle"] = "None";
+ wbuilder["indentation"] = "";
+ auto output = Json::writeString(wbuilder, value->toJson()) + "\n";
+ dht_->putSigned(infoHash, value);
+ s->close(restbed::OK, output);
+ } else {
+ s->close(restbed::BAD_REQUEST, "{\"err\":\"Incorrect JSON\"}");
+ }
+ }
+ } else {
+ s->close(restbed::SERVICE_UNAVAILABLE, "{\"err\":\"Incorrect DhtRunner\"}");
+ }
+ } catch (...) {
+ s->close(restbed::INTERNAL_SERVER_ERROR, "{\"err\":\"Internal server error\"}");
+ }
+ }
+ );
+}
+
+void
+DhtProxyServer::putEncrypted(const std::shared_ptr<restbed::Session>& session) const
+{
+ requestNum_++;
+ const auto request = session->get_request();
+ int content_length = std::stoi(request->get_header("Content-Length", "0"));
+ auto hash = request->get_path_parameter("hash");
+ InfoHash key(hash);
+ if (!key)
+ key = InfoHash::get(hash);
+
+ session->fetch(content_length,
+ [=](const std::shared_ptr<restbed::Session> s, const restbed::Bytes& b)
+ {
+ try {
+ if (dht_) {
+ if(b.empty()) {
+ std::string response("{\"err\":\"Missing parameters\"}");
+ s->close(restbed::BAD_REQUEST, response);
+ } else {
+ std::string err;
+ Json::Value root;
+ Json::CharReaderBuilder rbuilder;
+ auto* char_data = reinterpret_cast<const char*>(b.data());
+ auto reader = std::unique_ptr<Json::CharReader>(rbuilder.newCharReader());
+ bool parsingSuccessful = reader->parse(char_data, char_data + b.size(), &root, &err);
+ InfoHash to(root["to"].asString());
+ if (parsingSuccessful && to) {
+ auto value = std::make_shared<Value>(root);
+ Json::StreamWriterBuilder wbuilder;
+ wbuilder["commentStyle"] = "None";
+ wbuilder["indentation"] = "";
+ auto output = Json::writeString(wbuilder, value->toJson()) + "\n";
+ dht_->putEncrypted(key, to, value);
+ s->close(restbed::OK, output);
+ } else {
+ if(!parsingSuccessful)
+ s->close(restbed::BAD_REQUEST, "{\"err\":\"Incorrect JSON\"}");
+ else
+ s->close(restbed::BAD_REQUEST, "{\"err\":\"No destination found\"}");
+ }
+ }
+ } else {
+ s->close(restbed::SERVICE_UNAVAILABLE, "{\"err\":\"Incorrect DhtRunner\"}");
+ }
+ } catch (...) {
+ s->close(restbed::INTERNAL_SERVER_ERROR, "{\"err\":\"Internal server error\"}");
+ }
+ }
+ );
+}
+#endif // OPENDHT_PROXY_SERVER_IDENTITY
+
+void
+DhtProxyServer::handleOptionsMethod(const std::shared_ptr<restbed::Session>& session) const
+{
+ requestNum_++;
+#if OPENDHT_PROXY_SERVER_IDENTITY
+ const auto allowed = "OPTIONS, GET, POST, LISTEN, SIGN, ENCRYPT";
+#else
+ const auto allowed = "OPTIONS, GET, POST, LISTEN";
+#endif //OPENDHT_PROXY_SERVER_IDENTITY
+ session->close(restbed::OK, {{"Access-Control-Allow-Methods", allowed},
+ {"Access-Control-Allow-Headers", "content-type"},
+ {"Access-Control-Max-Age", "86400"}});
+}
+
+void
+DhtProxyServer::getFiltered(const std::shared_ptr<restbed::Session>& session) const
+{
+ requestNum_++;
+ const auto request = session->get_request();
+ int content_length = std::stoi(request->get_header("Content-Length", "0"));
+ auto hash = request->get_path_parameter("hash");
+ auto value = request->get_path_parameter("value");
+ session->fetch(content_length,
+ [=](const std::shared_ptr<restbed::Session> s, const restbed::Bytes& /*b* */)
+ {
+ try {
+ if (dht_) {
+ InfoHash infoHash(hash);
+ if (!infoHash) {
+ infoHash = InfoHash::get(hash);
+ }
+ s->yield(restbed::OK, "", [=]( const std::shared_ptr< restbed::Session > s) {
+ dht_->get(infoHash, [s](std::shared_ptr<Value> v) {
+ // Send values as soon as we get them
+ Json::StreamWriterBuilder wbuilder;
+ wbuilder["commentStyle"] = "None";
+ wbuilder["indentation"] = "";
+ auto output = Json::writeString(wbuilder, v->toJson()) + "\n";
+ s->yield(output, [](const std::shared_ptr<restbed::Session> /*session*/){ });
+ return true;
+ }, [s](bool /*ok* */) {
+ // Communication is finished
+ s->close();
+ }, {}, value);
+ });
+ } else {
+ s->close(restbed::SERVICE_UNAVAILABLE, "{\"err\":\"Incorrect DhtRunner\"}");
+ }
+ } catch (...) {
+ s->close(restbed::INTERNAL_SERVER_ERROR, "{\"err\":\"Internal server error\"}");
+ }
+ }
+ );
+}
+
+void
+DhtProxyServer::removeClosedListeners(bool testSession)
+{
+ // clean useless listeners
+ std::lock_guard<std::mutex> lock(lockListener_);
+ auto listener = currentListeners_.begin();
+ while (listener != currentListeners_.end()) {
+ auto cancel = dht_ and (not testSession or listener->session->is_closed());
+ if (cancel) {
+ dht_->cancelListen(listener->hash, std::move(listener->token));
+ // Remove listener if unused
+ listener = currentListeners_.erase(listener);
+ } else {
+ ++listener;
+ }
+ }
+}
+
+}
+#endif //OPENDHT_PROXY_SERVER
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Authors: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ * Simon Désaulniers <simon.desaulniers@savoirfairelinux.com>
+ * Sébastien Blin <sebastien.blin@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "dhtrunner.h"
+#include "securedht.h"
+
+#if OPENDHT_PROXY_CLIENT
+#include "dht_proxy_client.h"
+#endif
+
+#ifndef _WIN32
+#include <unistd.h>
+#else
+#include <io.h>
+#endif
+
+#ifndef _WIN32
+#include <sys/socket.h>
+#else
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#define close(x) closesocket(x)
+#define write(s, b, f) send(s, b, (int)strlen(b), 0)
+#endif
+
+namespace dht {
+
+constexpr std::chrono::seconds DhtRunner::BOOTSTRAP_PERIOD;
+static constexpr size_t RX_QUEUE_MAX_SIZE = 1024 * 16;
+
+struct DhtRunner::Listener {
+ size_t tokenClassicDht;
+ size_t tokenProxyDht;
+ ValueCallback gcb;
+ InfoHash hash;
+ Value::Filter f;
+ Where w;
+};
+
+DhtRunner::DhtRunner() : dht_()
+#if OPENDHT_PROXY_CLIENT
+, dht_via_proxy_()
+#endif //OPENDHT_PROXY_CLIENT
+{
+#ifdef _WIN32
+ WSADATA wsd;
+ if (WSAStartup(MAKEWORD(2,2), &wsd) != 0)
+ throw DhtException("Can't initialize Winsock2");
+#endif
+}
+
+DhtRunner::~DhtRunner()
+{
+ join();
+#ifdef _WIN32
+ WSACleanup();
+#endif
+}
+
+void
+DhtRunner::run(in_port_t port, DhtRunner::Config config)
+{
+ SockAddr sin4;
+ sin4.setFamily(AF_INET);
+ sin4.setPort(port);
+ SockAddr sin6;
+ sin6.setFamily(AF_INET6);
+ sin6.setPort(port);
+ run(sin4, sin6, config);
+}
+
+void
+DhtRunner::run(const char* ip4, const char* ip6, const char* service, DhtRunner::Config config)
+{
+ auto res4 = SockAddr::resolve(ip4, service);
+ auto res6 = SockAddr::resolve(ip6, service);
+ run(res4.empty() ? SockAddr() : res4.front(),
+ res6.empty() ? SockAddr() : res6.front(), config);
+}
+
+void
+DhtRunner::run(const SockAddr& local4, const SockAddr& local6, DhtRunner::Config config)
+{
+ if (running)
+ return;
+ startNetwork(local4, local6);
+
+ auto dht = std::unique_ptr<DhtInterface>(new Dht(s4, s6, SecureDht::getConfig(config.dht_config)));
+ dht_ = std::unique_ptr<SecureDht>(new SecureDht(std::move(dht), config.dht_config));
+
+#if OPENDHT_PROXY_CLIENT
+ config_ = config;
+#endif
+ enableProxy(not config.proxy_server.empty());
+
+ running = true;
+ if (not config.threaded)
+ return;
+ dht_thread = std::thread([this, local4, local6]() {
+ while (running) {
+ std::unique_lock<std::mutex> lk(dht_mtx);
+ time_point wakeup;
+ try {
+ wakeup = loop_();
+ } catch (const dht::SocketException& e) {
+ startNetwork(local4, local6);
+ }
+
+ auto hasJobToDo = [this]() {
+ if (not running)
+ return true;
+ {
+ std::lock_guard<std::mutex> lck(sock_mtx);
+ if (not rcv.empty())
+ return true;
+ }
+ {
+ std::lock_guard<std::mutex> lck(storage_mtx);
+ if (not pending_ops_prio.empty())
+ return true;
+ auto s = getStatus();
+ if (not pending_ops.empty() and (s == NodeStatus::Connected or (s == NodeStatus::Disconnected and not bootstraping)))
+ return true;
+ }
+ return false;
+ };
+ if (wakeup == time_point::max())
+ cv.wait(lk, hasJobToDo);
+ else
+ cv.wait_until(lk, wakeup, hasJobToDo);
+ }
+ });
+}
+
+void
+DhtRunner::shutdown(ShutdownCallback cb) {
+#if OPENDHT_PROXY_CLIENT
+ if (dht_via_proxy_)
+ dht_via_proxy_->shutdown(cb);
+#endif
+ std::lock_guard<std::mutex> lck(storage_mtx);
+ pending_ops_prio.emplace([=](SecureDht& dht) mutable {
+ dht.shutdown(cb);
+ });
+ cv.notify_all();
+}
+
+void
+DhtRunner::join()
+{
+ stopNetwork();
+ running = false;
+ cv.notify_all();
+ bootstrap_cv.notify_all();
+ if (dht_thread.joinable())
+ dht_thread.join();
+ if (bootstrap_thread.joinable())
+ bootstrap_thread.join();
+ if (rcv_thread.joinable())
+ rcv_thread.join();
+
+ {
+ std::lock_guard<std::mutex> lck(storage_mtx);
+ pending_ops = decltype(pending_ops)();
+ pending_ops_prio = decltype(pending_ops_prio)();
+ }
+ {
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ resetDht();
+ status4 = NodeStatus::Disconnected;
+ status6 = NodeStatus::Disconnected;
+ }
+}
+
+void
+DhtRunner::dumpTables() const
+{
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ activeDht()->dumpTables();
+}
+
+InfoHash
+DhtRunner::getId() const
+{
+ if (!activeDht())
+ return {};
+ return activeDht()->getId();
+}
+
+InfoHash
+DhtRunner::getNodeId() const
+{
+ if (!activeDht())
+ return {};
+ return activeDht()->getNodeId();
+}
+
+
+std::pair<size_t, size_t>
+DhtRunner::getStoreSize() const {
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ if (!dht_)
+ return {};
+ return dht_->getStoreSize();
+}
+
+void
+DhtRunner::setStorageLimit(size_t limit) {
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ if (!dht_)
+ throw std::runtime_error("dht is not running");
+ return dht_->setStorageLimit(limit);
+}
+
+std::vector<NodeExport>
+DhtRunner::exportNodes() const {
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ if (!dht_)
+ return {};
+ return dht_->exportNodes();
+}
+
+std::vector<ValuesExport>
+DhtRunner::exportValues() const {
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ if (!dht_)
+ return {};
+ return dht_->exportValues();
+}
+
+void
+DhtRunner::setLoggers(LogMethod error, LogMethod warn, LogMethod debug) {
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ if (dht_)
+ dht_->setLoggers(error, warn, debug);
+#if OPENDHT_PROXY_CLIENT
+ if (dht_via_proxy_)
+ dht_via_proxy_->setLoggers(error, warn, debug);
+#endif
+}
+
+void
+DhtRunner::setLogFilter(const InfoHash& f) {
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ activeDht()->setLogFilter(f);
+ if (dht_)
+ dht_->setLogFilter(f);
+#if OPENDHT_PROXY_CLIENT
+ if (dht_via_proxy_)
+ dht_via_proxy_->setLogFilter(f);
+#endif
+}
+
+void
+DhtRunner::registerType(const ValueType& type) {
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ activeDht()->registerType(type);
+}
+
+void
+DhtRunner::importValues(const std::vector<ValuesExport>& values) {
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ dht_->importValues(values);
+}
+
+unsigned
+DhtRunner::getNodesStats(sa_family_t af, unsigned *good_return, unsigned *dubious_return, unsigned *cached_return, unsigned *incoming_return) const
+{
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ const auto stats = activeDht()->getNodesStats(af);
+ if (good_return)
+ *good_return = stats.good_nodes;
+ if (dubious_return)
+ *dubious_return = stats.dubious_nodes;
+ if (cached_return)
+ *cached_return = stats.cached_nodes;
+ if (incoming_return)
+ *incoming_return = stats.incoming_nodes;
+ return stats.good_nodes + stats.dubious_nodes;
+}
+
+NodeStats
+DhtRunner::getNodesStats(sa_family_t af) const
+{
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ return activeDht()->getNodesStats(af);
+}
+
+NodeInfo
+DhtRunner::getNodeInfo() const {
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ NodeInfo info;
+ info.id = getId();
+ info.node_id = getNodeId();
+ info.ipv4 = dht_->getNodesStats(AF_INET);
+ info.ipv6 = dht_->getNodesStats(AF_INET6);
+ return info;
+}
+
+std::vector<unsigned>
+DhtRunner::getNodeMessageStats(bool in) const
+{
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ return activeDht()->getNodeMessageStats(in);
+}
+
+std::string
+DhtRunner::getStorageLog() const
+{
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ return activeDht()->getStorageLog();
+}
+std::string
+DhtRunner::getStorageLog(const InfoHash& f) const
+{
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ return activeDht()->getStorageLog(f);
+}
+std::string
+DhtRunner::getRoutingTablesLog(sa_family_t af) const
+{
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ return activeDht()->getRoutingTablesLog(af);
+}
+std::string
+DhtRunner::getSearchesLog(sa_family_t af) const
+{
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ return activeDht()->getSearchesLog(af);
+}
+std::string
+DhtRunner::getSearchLog(const InfoHash& f, sa_family_t af) const
+{
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ return activeDht()->getSearchLog(f, af);
+}
+std::vector<SockAddr>
+DhtRunner::getPublicAddress(sa_family_t af)
+{
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ if (auto dht = activeDht())
+ return dht->getPublicAddress(af);
+ return {};
+}
+std::vector<std::string>
+DhtRunner::getPublicAddressStr(sa_family_t af)
+{
+ auto addrs = getPublicAddress(af);
+ std::vector<std::string> ret(addrs.size());
+ std::transform(addrs.begin(), addrs.end(), ret.begin(), [](const SockAddr& a) { return a.toString(); });
+ return ret;
+}
+
+void
+DhtRunner::registerCertificate(std::shared_ptr<crypto::Certificate> cert) {
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ activeDht()->registerCertificate(cert);
+}
+void
+DhtRunner::setLocalCertificateStore(CertificateStoreQuery&& query_method) {
+ std::lock_guard<std::mutex> lck(dht_mtx);
+#if OPENDHT_PROXY_CLIENT
+ if (dht_via_proxy_)
+ dht_via_proxy_->setLocalCertificateStore(std::forward<CertificateStoreQuery>(query_method));
+#endif
+ if (dht_)
+ dht_->setLocalCertificateStore(std::forward<CertificateStoreQuery>(query_method));
+}
+
+time_point
+DhtRunner::loop_()
+{
+ auto dht = activeDht();
+ if (not dht)
+ return {};
+
+ decltype(pending_ops) ops {};
+ {
+ std::lock_guard<std::mutex> lck(storage_mtx);
+ auto s = getStatus();
+ ops = (pending_ops_prio.empty() && (s == NodeStatus::Connected or (s == NodeStatus::Disconnected and not bootstraping))) ?
+ std::move(pending_ops) : std::move(pending_ops_prio);
+ }
+ while (not ops.empty()) {
+ ops.front()(*dht);
+ ops.pop();
+ }
+
+ time_point wakeup {};
+ decltype(rcv) received {};
+ {
+ std::lock_guard<std::mutex> lck(sock_mtx);
+ // move to stack
+ received = std::move(rcv);
+ }
+ if (not received.empty()) {
+ while (not received.empty()) {
+ auto& pck = received.front();
+ auto delay = clock::now() - pck.received;
+ if (delay > std::chrono::milliseconds(500))
+ std::cerr << "Dropping packet with high delay: " << print_dt(delay) << std::endl;
+ else
+ wakeup = dht->periodic(pck.data.data(), pck.data.size()-1, pck.from);
+ received.pop();
+ }
+ } else {
+ wakeup = dht->periodic(nullptr, 0, nullptr, 0);
+ }
+
+ NodeStatus nstatus4 = dht->getStatus(AF_INET);
+ NodeStatus nstatus6 = dht->getStatus(AF_INET6);
+ if (nstatus4 != status4 || nstatus6 != status6) {
+ status4 = nstatus4;
+ status6 = nstatus6;
+ if (status4 == NodeStatus::Disconnected and status6 == NodeStatus::Disconnected) {
+ // We have lost connection with the DHT. Try to recover using bootstrap nodes.
+ std::unique_lock<std::mutex> lck(bootstrap_mtx);
+ bootstrap_nodes = bootstrap_nodes_all;
+ tryBootstrapContinuously();
+ } else {
+ std::unique_lock<std::mutex> lck(bootstrap_mtx);
+ bootstrap_nodes.clear();
+ }
+ if (statusCb)
+ statusCb(status4, status6);
+ }
+
+ return wakeup;
+}
+
+
+int bindSocket(const SockAddr& addr, SockAddr& bound)
+{
+ bool is_ipv6 = addr.getFamily() == AF_INET6;
+ int sock = socket(is_ipv6 ? PF_INET6 : PF_INET, SOCK_DGRAM, 0);
+ if (sock < 0)
+ throw DhtException(std::string("Can't open socket: ") + strerror(sock));
+ int set = 1;
+#ifdef SO_NOSIGPIPE
+ setsockopt(sock, SOL_SOCKET, SO_NOSIGPIPE, (const char*)&set, sizeof(set));
+#endif
+ if (is_ipv6)
+ setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (const char*)&set, sizeof(set));
+ int rc = bind(sock, addr.get(), addr.getLength());
+ if(rc < 0) {
+ rc = errno;
+ close(sock);
+ throw DhtException("Can't bind socket on " + addr.toString() + " " + strerror(rc));
+ }
+ sockaddr_storage ss;
+ socklen_t ss_len = sizeof(ss);
+ getsockname(sock, (sockaddr*)&ss, &ss_len);
+ bound = {ss, ss_len};
+ return sock;
+}
+
+#ifdef _WIN32
+inline void udpPipe(int fds[2])
+{
+ int lst = socket(AF_INET, SOCK_DGRAM, 0);
+ if (lst < 0)
+ throw DhtException(std::string("Can't open socket: ") + strerror(lst));
+ sockaddr_in inaddr;
+ sockaddr addr;
+ memset(&inaddr, 0, sizeof(inaddr));
+ memset(&addr, 0, sizeof(addr));
+ inaddr.sin_family = AF_INET;
+ inaddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ inaddr.sin_port = 0;
+ int yes=1;
+ setsockopt(lst, SOL_SOCKET, SO_REUSEADDR, (char*)&yes, sizeof(yes));
+ int rc = bind(lst, (sockaddr*)&inaddr, sizeof(inaddr));
+ if (rc < 0) {
+ close(lst);
+ throw DhtException("Can't bind socket on " + print_addr((sockaddr*)&addr, sizeof(inaddr)) + " " + strerror(rc));
+ }
+ socklen_t len = sizeof(inaddr);
+ getsockname(lst, &addr, &len);
+ fds[0] = lst;
+ fds[1] = socket(AF_INET, SOCK_DGRAM, 0);
+ connect(fds[1], &addr, len);
+}
+#endif
+
+void
+DhtRunner::stopNetwork()
+{
+ running_network = false;
+ if (stop_writefd != -1) {
+ if (write(stop_writefd, "\0", 1) == -1) {
+ perror("write");
+ }
+ }
+}
+
+void
+DhtRunner::startNetwork(const SockAddr sin4, const SockAddr sin6)
+{
+ stopNetwork();
+ if (rcv_thread.joinable())
+ rcv_thread.join();
+
+ int stopfds[2];
+#ifndef _WIN32
+ auto status = pipe(stopfds);
+ if (status == -1) {
+ throw DhtException("Can't open pipe");
+ }
+#else
+ udpPipe(stopfds);
+#endif
+ int stop_readfd = stopfds[0];
+ stop_writefd = stopfds[1];
+
+ s4 = -1;
+ s6 = -1;
+
+ bound4 = {};
+ if (sin4)
+ s4 = bindSocket(sin4, bound4);
+
+#if 1
+ bound6 = {};
+ if (sin6)
+ s6 = bindSocket(sin6, bound6);
+#endif
+
+ running_network = true;
+ rcv_thread = std::thread([this, stop_readfd]() {
+ try {
+ while (running_network) {
+ fd_set readfds;
+
+ FD_ZERO(&readfds);
+ FD_SET(stop_readfd, &readfds);
+ if(s4 >= 0)
+ FD_SET(s4, &readfds);
+ if(s6 >= 0)
+ FD_SET(s6, &readfds);
+
+ int rc = select(s4 > s6 ? s4 + 1 : s6 + 1, &readfds, nullptr, nullptr, nullptr);
+ if(rc < 0) {
+ if(errno != EINTR) {
+ perror("select");
+ std::this_thread::sleep_for( std::chrono::seconds(1) );
+ }
+ }
+
+ if (not running_network)
+ break;
+
+ if(rc > 0) {
+ std::array<uint8_t, 1024 * 64> buf;
+ sockaddr_storage from;
+ socklen_t from_len = sizeof(from);
+
+ if(s4 >= 0 && FD_ISSET(s4, &readfds))
+ rc = recvfrom(s4, (char*)buf.data(), buf.size(), 0, (sockaddr*)&from, &from_len);
+ else if(s6 >= 0 && FD_ISSET(s6, &readfds))
+ rc = recvfrom(s6, (char*)buf.data(), buf.size(), 0, (sockaddr*)&from, &from_len);
+ else
+ continue;
+ if (rc > 0) {
+ {
+ std::lock_guard<std::mutex> lck(sock_mtx);
+ if (rcv.size() >= RX_QUEUE_MAX_SIZE) {
+ std::cerr << "Dropping packet: queue is full!" << std::endl;
+ rcv.pop();
+ }
+ rcv.emplace(ReceivedPacket {Blob {buf.begin(), buf.begin()+rc+1}, SockAddr(from, from_len), clock::now()});
+ }
+ cv.notify_all();
+ }
+ }
+ }
+ } catch (const std::exception& e) {
+ std::cerr << "Error in DHT networking thread: " << e.what() << std::endl;
+ }
+ if (s4 >= 0)
+ close(s4);
+ if (s6 >= 0)
+ close(s6);
+ s4 = -1;
+ s6 = -1;
+ bound4 = {};
+ bound6 = {};
+ if (stop_readfd != -1)
+ close(stop_readfd);
+ if (stop_writefd != -1)
+ close(stop_writefd);
+ stop_writefd = -1;
+ });
+}
+
+void
+DhtRunner::get(InfoHash hash, GetCallback vcb, DoneCallback dcb, Value::Filter f, Where w)
+{
+ {
+ std::lock_guard<std::mutex> lck(storage_mtx);
+ pending_ops.emplace([=](SecureDht& dht) mutable {
+ dht.get(hash, vcb, dcb, std::move(f), std::move(w));
+ });
+ }
+ cv.notify_all();
+}
+
+void
+DhtRunner::get(const std::string& key, GetCallback vcb, DoneCallbackSimple dcb, Value::Filter f, Where w)
+{
+ get(InfoHash::get(key), vcb, dcb, f, w);
+}
+void
+DhtRunner::query(const InfoHash& hash, QueryCallback cb, DoneCallback done_cb, Query q) {
+ {
+ std::lock_guard<std::mutex> lck(storage_mtx);
+ pending_ops.emplace([=](SecureDht& dht) mutable {
+ dht.query(hash, cb, done_cb, std::move(q));
+ });
+ }
+ cv.notify_all();
+}
+
+std::future<size_t>
+DhtRunner::listen(InfoHash hash, ValueCallback vcb, Value::Filter f, Where w)
+{
+ auto ret_token = std::make_shared<std::promise<size_t>>();
+ {
+ std::lock_guard<std::mutex> lck(storage_mtx);
+ pending_ops.emplace([=](SecureDht& dht) mutable {
+#if OPENDHT_PROXY_CLIENT
+ auto tokenbGlobal = listener_token_++;
+ Listener listener {};
+ listener.hash = hash;
+ listener.f = std::move(f);
+ listener.w = std::move(w);
+ listener.gcb = [hash,vcb,tokenbGlobal,this](const std::vector<Sp<Value>>& vals, bool expired){
+ if (not vcb(vals, expired)) {
+ cancelListen(hash, tokenbGlobal);
+ return false;
+ }
+ return true;
+ };
+ if (use_proxy)
+ listener.tokenProxyDht = dht.listen(hash, listener.gcb, listener.f, listener.w);
+ else
+ listener.tokenClassicDht = dht.listen(hash, listener.gcb, listener.f, listener.w);
+ listeners_.emplace(tokenbGlobal, std::move(listener));
+ ret_token->set_value(tokenbGlobal);
+#else
+ ret_token->set_value(dht.listen(hash, vcb, f, w));
+#endif
+ });
+ }
+ cv.notify_all();
+ return ret_token->get_future();
+}
+
+std::future<size_t>
+DhtRunner::listen(const std::string& key, GetCallback vcb, Value::Filter f, Where w)
+{
+ return listen(InfoHash::get(key), vcb, f, w);
+}
+
+void
+DhtRunner::cancelListen(InfoHash h, size_t token)
+{
+ {
+ std::lock_guard<std::mutex> lck(storage_mtx);
+#if OPENDHT_PROXY_CLIENT
+ pending_ops.emplace([=](SecureDht&) {
+ auto it = listeners_.find(token);
+ if (it == listeners_.end()) return;
+ if (it->second.tokenClassicDht)
+ dht_->cancelListen(h, it->second.tokenClassicDht);
+ if (it->second.tokenProxyDht and dht_via_proxy_)
+ dht_via_proxy_->cancelListen(h, it->second.tokenProxyDht);
+ listeners_.erase(it);
+ });
+#else
+ pending_ops.emplace([=](SecureDht& dht) {
+ dht.cancelListen(h, token);
+ });
+#endif // OPENDHT_PROXY_CLIENT
+ }
+ cv.notify_all();
+}
+
+void
+DhtRunner::cancelListen(InfoHash h, std::shared_future<size_t> ftoken)
+{
+ {
+ std::lock_guard<std::mutex> lck(storage_mtx);
+#if OPENDHT_PROXY_CLIENT
+ pending_ops.emplace([=](SecureDht&) {
+ auto it = listeners_.find(ftoken.get());
+ if (it == listeners_.end()) return;
+ if (it->second.tokenClassicDht)
+ dht_->cancelListen(h, it->second.tokenClassicDht);
+ if (it->second.tokenProxyDht and dht_via_proxy_)
+ dht_via_proxy_->cancelListen(h, it->second.tokenProxyDht);
+ listeners_.erase(it);
+ });
+#else
+ pending_ops.emplace([=](SecureDht& dht) {
+ dht.cancelListen(h, ftoken.get());
+ });
+#endif // OPENDHT_PROXY_CLIENT
+ }
+ cv.notify_all();
+}
+
+void
+DhtRunner::put(InfoHash hash, Value&& value, DoneCallback cb, time_point created, bool permanent)
+{
+ {
+ std::lock_guard<std::mutex> lck(storage_mtx);
+ auto sv = std::make_shared<Value>(std::move(value));
+ pending_ops.emplace([=](SecureDht& dht) {
+ dht.put(hash, sv, cb, created, permanent);
+ });
+ }
+ cv.notify_all();
+}
+
+void
+DhtRunner::put(InfoHash hash, std::shared_ptr<Value> value, DoneCallback cb, time_point created, bool permanent)
+{
+ {
+ std::lock_guard<std::mutex> lck(storage_mtx);
+ pending_ops.emplace([=](SecureDht& dht) {
+ dht.put(hash, value, cb, created, permanent);
+ });
+ }
+ cv.notify_all();
+}
+
+void
+DhtRunner::put(const std::string& key, Value&& value, DoneCallbackSimple cb, time_point created, bool permanent)
+{
+ put(InfoHash::get(key), std::forward<Value>(value), cb, created, permanent);
+}
+
+void
+DhtRunner::cancelPut(const InfoHash& h , const Value::Id& id)
+{
+ {
+ std::lock_guard<std::mutex> lck(storage_mtx);
+ pending_ops.emplace([=](SecureDht& dht) {
+ dht.cancelPut(h, id);
+ });
+ }
+ cv.notify_all();
+}
+
+void
+DhtRunner::putSigned(InfoHash hash, std::shared_ptr<Value> value, DoneCallback cb)
+{
+ {
+ std::lock_guard<std::mutex> lck(storage_mtx);
+ pending_ops.emplace([=](SecureDht& dht) {
+ dht.putSigned(hash, value, cb);
+ });
+ }
+ cv.notify_all();
+}
+
+void
+DhtRunner::putSigned(InfoHash hash, Value&& value, DoneCallback cb)
+{
+ putSigned(hash, std::make_shared<Value>(std::move(value)), cb);
+}
+
+void
+DhtRunner::putSigned(const std::string& key, Value&& value, DoneCallbackSimple cb)
+{
+ putSigned(InfoHash::get(key), std::forward<Value>(value), cb);
+}
+
+void
+DhtRunner::putEncrypted(InfoHash hash, InfoHash to, std::shared_ptr<Value> value, DoneCallback cb)
+{
+ {
+ std::lock_guard<std::mutex> lck(storage_mtx);
+ pending_ops.emplace([=](SecureDht& dht) {
+ dht.putEncrypted(hash, to, value, cb);
+ });
+ }
+ cv.notify_all();
+}
+
+void
+DhtRunner::putEncrypted(InfoHash hash, InfoHash to, Value&& value, DoneCallback cb)
+{
+ putEncrypted(hash, to, std::make_shared<Value>(std::move(value)), cb);
+}
+
+void
+DhtRunner::putEncrypted(const std::string& key, InfoHash to, Value&& value, DoneCallback cb)
+{
+ putEncrypted(InfoHash::get(key), to, std::forward<Value>(value), cb);
+}
+
+void
+DhtRunner::tryBootstrapContinuously()
+{
+ if (bootstrap_thread.joinable()) {
+ if (bootstraping)
+ return; // already running
+ else
+ bootstrap_thread.join();
+ }
+ bootstraping = true;
+ bootstrap_thread = std::thread([this]() {
+ auto next = clock::now();
+ do {
+ decltype(bootstrap_nodes) nodes;
+ {
+ std::lock_guard<std::mutex> lck(bootstrap_mtx);
+ nodes = bootstrap_nodes;
+ }
+
+ next += BOOTSTRAP_PERIOD;
+ {
+ std::mutex mtx;
+ std::unique_lock<std::mutex> blck(mtx);
+ unsigned ping_count(0);
+ // Reverse: try last inserted bootstrap nodes first
+ for (auto it = nodes.rbegin(); it != nodes.rend(); it++) {
+ ++ping_count;
+ try {
+ bootstrap(SockAddr::resolve(it->first, it->second), [&](bool) {
+ if (not running)
+ return;
+ {
+ std::unique_lock<std::mutex> blck(mtx);
+ --ping_count;
+ }
+ bootstrap_cv.notify_all();
+ });
+ } catch (std::invalid_argument& e) {
+ --ping_count;
+ std::cerr << e.what() << std::endl;
+ }
+ }
+ // wait at least until the next BOOTSTRAP_PERIOD
+ bootstrap_cv.wait_until(blck, next, [&]() { return not running; });
+ // wait for bootstrap requests to end.
+ if (running)
+ bootstrap_cv.wait(blck, [&]() { return not running or ping_count == 0; });
+ }
+ // update state
+ {
+ std::lock_guard<std::mutex> lck(dht_mtx);
+ bootstraping = running and
+ status4 == NodeStatus::Disconnected and
+ status6 == NodeStatus::Disconnected;
+ }
+ } while (bootstraping);
+ });
+}
+
+void
+DhtRunner::bootstrap(const std::string& host, const std::string& service)
+{
+ std::lock_guard<std::mutex> lck(bootstrap_mtx);
+ bootstrap_nodes_all.emplace_back(host, service);
+ bootstrap_nodes.emplace_back(host, service);
+ tryBootstrapContinuously();
+}
+
+void
+DhtRunner::clearBootstrap()
+{
+ std::lock_guard<std::mutex> lck(bootstrap_mtx);
+ bootstrap_nodes_all.clear();
+}
+
+void
+DhtRunner::bootstrap(const std::vector<SockAddr>& nodes, DoneCallbackSimple&& cb)
+{
+ std::lock_guard<std::mutex> lck(storage_mtx);
+ pending_ops_prio.emplace([=](SecureDht& dht) mutable {
+ auto rem = cb ? std::make_shared<std::pair<size_t, bool>>(nodes.size(), false) : nullptr;
+ for (const auto& node : nodes)
+ dht.pingNode(node.get(), node.getLength(), cb ? [rem,cb](bool ok) {
+ auto& r = *rem;
+ r.first--;
+ r.second |= ok;
+ if (not r.first)
+ cb(r.second);
+ } : DoneCallbackSimple{});
+ });
+ cv.notify_all();
+}
+
+void
+DhtRunner::bootstrap(const SockAddr& addr, DoneCallbackSimple&& cb)
+{
+ std::lock_guard<std::mutex> lck(storage_mtx);
+ pending_ops_prio.emplace([addr,cb](SecureDht& dht) mutable {
+ dht.pingNode(addr.get(), addr.getLength(), std::move(cb));
+ });
+ cv.notify_all();
+}
+
+void
+DhtRunner::bootstrap(const std::vector<NodeExport>& nodes)
+{
+ {
+ std::lock_guard<std::mutex> lck(storage_mtx);
+ pending_ops_prio.emplace([=](SecureDht& dht) {
+ for (auto& node : nodes)
+ dht.insertNode(node);
+ });
+ }
+ cv.notify_all();
+}
+
+void
+DhtRunner::connectivityChanged()
+{
+ {
+ std::lock_guard<std::mutex> lck(storage_mtx);
+ pending_ops_prio.emplace([=](SecureDht& dht) {
+ dht.connectivityChanged();
+ });
+ }
+ cv.notify_all();
+}
+
+void
+DhtRunner::findCertificate(InfoHash hash, std::function<void(const std::shared_ptr<crypto::Certificate>)> cb) {
+ {
+ std::lock_guard<std::mutex> lck(storage_mtx);
+ pending_ops.emplace([=](SecureDht& dht) {
+ dht.findCertificate(hash, cb);
+ });
+ }
+ cv.notify_all();
+}
+
+void
+DhtRunner::resetDht()
+{
+#if OPENDHT_PROXY_CLIENT
+ listeners_.clear();
+ dht_via_proxy_.reset();
+#endif // OPENDHT_PROXY_CLIENT
+ dht_.reset();
+}
+
+SecureDht*
+DhtRunner::activeDht() const
+{
+#if OPENDHT_PROXY_CLIENT
+ return use_proxy? dht_via_proxy_.get() : dht_.get();
+#else
+ return dht_.get();
+#endif // OPENDHT_PROXY_CLIENT
+}
+
+void
+DhtRunner::setProxyServer(const std::string& proxy, const std::string& pushNodeId)
+{
+#if OPENDHT_PROXY_CLIENT
+ if (config_.proxy_server == proxy and config_.push_node_id == pushNodeId)
+ return;
+ config_.proxy_server = proxy;
+ config_.push_node_id = pushNodeId;
+ enableProxy(use_proxy and not config_.proxy_server.empty());
+#else
+ if (not proxy.empty())
+ std::cerr << "DHT proxy requested but OpenDHT built without proxy support." << std::endl;
+#endif
+}
+
+void
+DhtRunner::enableProxy(bool proxify)
+{
+#if OPENDHT_PROXY_CLIENT
+ if (dht_via_proxy_) {
+ dht_via_proxy_->shutdown({});
+ }
+ if (proxify) {
+ // Init the proxy client
+ auto dht_via_proxy = std::unique_ptr<DhtInterface>(
+ new DhtProxyClient([this]{
+ if (config_.threaded) {
+ {
+ std::lock_guard<std::mutex> lck(storage_mtx);
+ pending_ops_prio.emplace([=](SecureDht&) mutable {});
+ }
+ cv.notify_all();
+ }
+ }, config_.proxy_server, config_.push_node_id)
+ );
+ dht_via_proxy_ = std::unique_ptr<SecureDht>(new SecureDht(std::move(dht_via_proxy), config_.dht_config));
+#if OPENDHT_PUSH_NOTIFICATIONS
+ if (not pushToken_.empty())
+ dht_via_proxy_->setPushNotificationToken(pushToken_);
+#endif
+ // add current listeners
+ for (auto& l: listeners_)
+ l.second.tokenProxyDht = dht_via_proxy_->listen(l.second.hash, l.second.gcb, l.second.f, l.second.w);
+ // and use it
+ use_proxy = proxify;
+ } else {
+ use_proxy = proxify;
+ std::lock_guard<std::mutex> lck(storage_mtx);
+ if (not listeners_.empty()) {
+ pending_ops.emplace([this](SecureDht& /*dht*/) mutable {
+ if (not dht_)
+ return;
+ for (auto& l : listeners_) {
+ if (not l.second.tokenClassicDht) {
+ l.second.tokenClassicDht = dht_->listen(l.second.hash, l.second.gcb, l.second.f, l.second.w);
+ }
+ }
+ });
+ }
+ }
+#else
+ if (proxify)
+ std::cerr << "DHT proxy requested but OpenDHT built without proxy support." << std::endl;
+#endif
+}
+
+void
+DhtRunner::forwardAllMessages(bool forward)
+{
+#if OPENDHT_PROXY_SERVER
+#if OPENDHT_PROXY_CLIENT
+ if (dht_via_proxy_)
+ dht_via_proxy_->forwardAllMessages(forward);
+#endif // OPENDHT_PROXY_CLIENT
+ if (dht_)
+ dht_->forwardAllMessages(forward);
+#endif // OPENDHT_PROXY_SERVER
+}
+
+/**
+ * Updates the push notification device token
+ */
+void
+DhtRunner::setPushNotificationToken(const std::string& token) {
+#if OPENDHT_PROXY_CLIENT && OPENDHT_PUSH_NOTIFICATIONS
+ pushToken_ = token;
+ if (dht_via_proxy_)
+ dht_via_proxy_->setPushNotificationToken(token);
+#endif
+}
+
+void
+DhtRunner::pushNotificationReceived(const std::map<std::string, std::string>& data)
+{
+#if OPENDHT_PROXY_CLIENT && OPENDHT_PUSH_NOTIFICATIONS
+ {
+ std::lock_guard<std::mutex> lck(storage_mtx);
+ pending_ops_prio.emplace([=](SecureDht&) {
+ if (dht_via_proxy_)
+ dht_via_proxy_->pushNotificationReceived(data);
+ });
+ }
+ cv.notify_all();
+#endif
+}
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ * Simon Désaulniers <simon.desaulniers@savoirfairelinux.com>
+ * Nicolas Reynaud <nicolas.reynaud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "indexation/pht.h"
+#include "rng.h"
+
+namespace dht {
+namespace indexation {
+
+/**
+ * Output the blob into string and readable way
+ *
+ * @param bl : Blob to print
+ *
+ * @return string that represent the blob into a readable way
+ */
+static std::string blobToString(const Blob &bl) {
+ std::stringstream ss;
+ auto bn = bl.size() % 8;
+ auto n = bl.size() / 8;
+
+ for (size_t i = 0; i < bl.size(); i++)
+ ss << std::bitset<8>(bl[i]) << " ";
+ if (bn)
+ for (unsigned b=0; b < bn; b++)
+ ss << (char)((bl[n] & (1 << (7 - b))) ? '1':'0');
+
+ return ss.str();
+}
+
+std::string Prefix::toString() const {
+ std::stringstream ss;
+
+ ss << "Prefix : " << std::endl << "\tContent_ : \"";
+ ss << blobToString(content_);
+ ss << "\"" << std::endl;
+
+ ss << "\tFlags_ : \"";
+ ss << blobToString(flags_);
+ ss << "\"" << std::endl;
+
+ return ss.str();
+}
+
+void Pht::Cache::insert(const Prefix& p) {
+ size_t i = 0;
+ auto now = clock::now();
+
+ std::shared_ptr<Node> curr_node;
+
+ while ((leaves_.size() > 0
+ and leaves_.begin()->first + NODE_EXPIRE_TIME < now)
+ or leaves_.size() > MAX_ELEMENT) {
+
+ leaves_.erase(leaves_.begin());
+ }
+
+ if (not (curr_node = root_.lock()) ) {
+ /* Root does not exist, need to create one*/
+ curr_node = std::make_shared<Node>();
+ root_ = curr_node;
+ }
+
+ curr_node->last_reply = now;
+
+ /* Iterate through all bit of the Blob */
+ for ( i = 0; i < p.size_; i++ ) {
+
+ /* According to the bit define which node is the next one */
+ auto& next = ( p.isContentBitActive(i) ) ? curr_node->right_child : curr_node->left_child;
+
+ /**
+ * If lock, node exists
+ * else create it
+ */
+ if (auto n = next.lock()) {
+ curr_node = std::move(n);
+ } else {
+ /* Create the next node if doesn't exist*/
+ auto tmp_curr_node = std::make_shared<Node>();
+ tmp_curr_node->parent = curr_node;
+ next = tmp_curr_node;
+ curr_node = std::move(tmp_curr_node);
+ }
+
+ curr_node->last_reply = now;
+ }
+
+ /* Insert the leaf (curr_node) into the multimap */
+ leaves_.emplace(std::move(now), std::move(curr_node) );
+}
+
+int Pht::Cache::lookup(const Prefix& p) {
+ int pos = -1;
+ auto now = clock::now(), last_node_time = now;
+
+ /* Before lookup remove the useless one [i.e. too old] */
+ while ( leaves_.size() > 0
+ and leaves_.begin()->first + NODE_EXPIRE_TIME < now ) {
+
+ leaves_.erase(leaves_.begin());
+ }
+
+ auto next = root_;
+ std::shared_ptr<Node> curr_node;
+
+ while ( auto n = next.lock() ) {
+ ++pos;
+ /* Safe since pos is equal to 0 until here */
+ if ( (unsigned) pos >= p.content_.size() * 8) break;
+
+ curr_node = n;
+ last_node_time = curr_node->last_reply;
+ curr_node->last_reply = now;
+
+ /* Get the Prefix bit by bit, starting from left */
+ next = ( p.isContentBitActive(pos) ) ? curr_node->right_child : curr_node->left_child;
+ }
+
+ if ( pos >= 0 ) {
+ auto to_erase = leaves_.find(last_node_time);
+ if ( to_erase != leaves_.end() )
+ leaves_.erase( to_erase );
+
+ leaves_.emplace( std::move(now), std::move(curr_node) );
+ }
+
+ return pos;
+}
+
+const ValueType IndexEntry::TYPE = ValueType::USER_DATA;
+constexpr std::chrono::minutes Pht::Cache::NODE_EXPIRE_TIME;
+
+void Pht::lookupStep(Prefix p, std::shared_ptr<int> lo, std::shared_ptr<int> hi,
+ std::shared_ptr<std::vector<std::shared_ptr<IndexEntry>>> vals,
+ LookupCallbackWrapper cb, DoneCallbackSimple done_cb,
+ std::shared_ptr<unsigned> max_common_prefix_len,
+ int start, bool all_values)
+{
+ struct node_lookup_result {
+ bool done {false};
+ bool is_pht {false};
+ };
+
+ /* start could be under 0 but after the compare it to 0 it always will be unsigned, so we can cast it*/
+ auto mid = (start >= 0) ? (unsigned) start : (*lo + *hi)/2;
+
+ auto first_res = std::make_shared<node_lookup_result>();
+ auto second_res = std::make_shared<node_lookup_result>();
+
+ auto on_done = [=](bool ok) {
+ bool is_leaf = first_res->is_pht and not second_res->is_pht;
+ if (not ok) {
+ if (done_cb)
+ done_cb(false);
+ }
+ else if (is_leaf or *lo > *hi) {
+ // leaf node
+ Prefix to_insert = p.getPrefix(mid);
+ cache_.insert(to_insert);
+
+ if (cb) {
+ if (vals->size() == 0 and max_common_prefix_len and mid > 0) {
+ auto p_ = (p.getPrefix(mid)).getSibling().getFullSize();
+ *lo = mid;
+ *hi = p_.size_;
+ lookupStep(p_, lo, hi, vals, cb, done_cb, max_common_prefix_len, -1, all_values);
+ }
+
+ cb(*vals, to_insert);
+ }
+
+ if (done_cb)
+ done_cb(true);
+ } else if (first_res->is_pht) {
+ // internal node
+ *lo = mid+1;
+ lookupStep(p, lo, hi, vals, cb, done_cb, max_common_prefix_len, -1, all_values);
+ } else {
+ // first get failed before second.
+ if (done_cb)
+ done_cb(false);
+ }
+ };
+
+ if (*lo <= *hi) {
+ auto pht_filter = [&](const dht::Value& v) {
+ return v.user_type.compare(0, name_.size(), name_) == 0;
+ };
+
+ auto on_get = [=](const std::shared_ptr<dht::Value>& value, std::shared_ptr<node_lookup_result> res) {
+ if (value->user_type == canary_) {
+ res->is_pht = true;
+ }
+ else {
+ IndexEntry entry;
+ entry.unpackValue(*value);
+
+ auto it = std::find_if(vals->cbegin(), vals->cend(), [&](const std::shared_ptr<IndexEntry>& ie) {
+ return ie->value == entry.value;
+ });
+
+ /* If we already got the value then get the next one */
+ if (it != vals->cend())
+ return true;
+
+ if (max_common_prefix_len) { /* inexact match case */
+ auto common_bits = Prefix::commonBits(p, entry.prefix);
+
+ if (vals->empty()) {
+ vals->emplace_back(std::make_shared<IndexEntry>(entry));
+ *max_common_prefix_len = common_bits;
+ }
+ else {
+ if (common_bits == *max_common_prefix_len) /* this is the max so far */
+ vals->emplace_back(std::make_shared<IndexEntry>(entry));
+ else if (common_bits > *max_common_prefix_len) { /* new max found! */
+ vals->clear();
+ vals->emplace_back(std::make_shared<IndexEntry>(entry));
+ *max_common_prefix_len = common_bits;
+ }
+ }
+ } else if (all_values or entry.prefix == p.content_) /* exact match case */
+ vals->emplace_back(std::make_shared<IndexEntry>(entry));
+ }
+
+ return true;
+ };
+
+ dht_->get(p.getPrefix(mid).hash(),
+ std::bind(on_get, std::placeholders::_1, first_res),
+ [=](bool ok) {
+ if (not ok) {
+ // DHT failed
+ first_res->done = true;
+ if (done_cb and second_res->done)
+ on_done(false);
+ }
+ else {
+ if (not first_res->is_pht) {
+ // Not a PHT node.
+ *hi = mid-1;
+ lookupStep(p, lo, hi, vals, cb, done_cb, max_common_prefix_len, -1, all_values);
+ } else {
+ first_res->done = true;
+ if (second_res->done or mid >= p.size_ - 1)
+ on_done(true);
+ }
+ }
+ }, pht_filter);
+
+ if (mid < p.size_ - 1)
+ dht_->get(p.getPrefix(mid+1).hash(),
+ std::bind(on_get, std::placeholders::_1, second_res),
+ [=](bool ok) {
+ if (not ok) {
+ // DHT failed
+ second_res->done = true;
+ if (done_cb and first_res->done)
+ on_done(false);
+ }
+ else {
+ second_res->done = true;
+ if (first_res->done)
+ on_done(true);
+ }
+ }, pht_filter);
+ } else {
+ on_done(true);
+ }
+}
+
+void Pht::lookup(Key k, Pht::LookupCallback cb, DoneCallbackSimple done_cb, bool exact_match) {
+ auto prefix = linearize(k);
+ auto values = std::make_shared<std::vector<std::shared_ptr<IndexEntry>>>();
+
+ auto lo = std::make_shared<int>(0);
+ auto hi = std::make_shared<int>(prefix.size_);
+ std::shared_ptr<unsigned> max_common_prefix_len = not exact_match ? std::make_shared<unsigned>(0) : nullptr;
+
+ lookupStep(prefix, lo, hi, values,
+ [=](std::vector<std::shared_ptr<IndexEntry>>& entries, const Prefix& p) {
+ std::vector<std::shared_ptr<Value>> vals(entries.size());
+
+ std::transform(entries.begin(), entries.end(), vals.begin(),
+ [](const std::shared_ptr<IndexEntry>& ie) {
+ return std::make_shared<Value>(ie->value);
+ });
+
+ cb(vals, p);
+ }, done_cb, max_common_prefix_len, cache_.lookup(prefix));
+}
+
+void Pht::updateCanary(Prefix p) {
+ // TODO: change this... copy value
+ dht::Value canary_value;
+ canary_value.user_type = canary_;
+
+ dht_->put(p.hash(), std::move(canary_value),
+ [=](bool){
+ static std::bernoulli_distribution d(0.5);
+ crypto::random_device rd;
+ if (p.size_ and d(rd))
+ updateCanary(p.getPrefix(-1));
+ }
+ );
+
+ if (p.size_) {
+ dht::Value canary_second_value;
+ canary_second_value.user_type = canary_;
+ dht_->put(p.getSibling().hash(), std::move(canary_second_value));
+ }
+}
+
+void Pht::insert(const Prefix& kp, IndexEntry entry, std::shared_ptr<int> lo, std::shared_ptr<int> hi, time_point time_p,
+ bool check_split, DoneCallbackSimple done_cb) {
+
+ if (time_p + ValueType::USER_DATA.expiration < clock::now()) return;
+
+ auto vals = std::make_shared<std::vector<std::shared_ptr<IndexEntry>>>();
+ auto final_prefix = std::make_shared<Prefix>();
+
+ lookupStep(kp, lo, hi, vals,
+ [=](std::vector<std::shared_ptr<IndexEntry>>&, Prefix p) {
+ *final_prefix = Prefix(p);
+ },
+ [=](bool ok){
+ if (not ok) {
+ if (done_cb)
+ done_cb(false);
+ } else {
+
+ RealInsertCallback real_insert = [=](const Prefix& p, IndexEntry entry) {
+ updateCanary(p);
+ checkPhtUpdate(p, entry, time_p);
+ cache_.insert(p);
+ dht_->put(p.hash(), std::move(entry), done_cb , time_p);
+ };
+
+ if ( not check_split or final_prefix->size_ == kp.size_ ) {
+ real_insert(*final_prefix, std::move(entry));
+ } else {
+ if ( vals->size() < MAX_NODE_ENTRY_COUNT ) {
+ getRealPrefix(final_prefix, std::move(entry), real_insert);
+ }
+ else {
+ split(*final_prefix, *vals, entry, real_insert);
+ }
+ }
+ }
+ }, nullptr, cache_.lookup(kp), true);
+}
+
+Prefix Pht::zcurve(const std::vector<Prefix>& all_prefix) const {
+ Prefix p;
+
+ if ( all_prefix.size() == 1 )
+ return all_prefix[0];
+
+ /* All prefix got the same size (thanks to padding) */
+ size_t prefix_size = all_prefix[0].content_.size();
+
+ /* Loop on all uint8_t of the input prefix */
+ for ( size_t j = 0, bit = 0; j < prefix_size; j++) {
+
+ uint8_t mask = 0x80;
+ /* For each of the 8 bits of the input uint8_t */
+ for ( int i = 0; i < 8; ) {
+
+ uint8_t flags = 0;
+ uint8_t content = 0;
+
+ /* For each bit of the output uint8_t */
+ for ( int k = 0 ; k < 8; k++ ) {
+
+ auto diff = k - i;
+
+ /*get the content 'c', and the flag 'f' of the input prefix */
+ auto c = all_prefix[bit].content_[j] & mask;
+ auto f = all_prefix[bit].flags_[j] & mask;
+
+ /* Move this bit at the right position according to the diff
+ and merge it into content and flags in the same way */
+ content |= ( diff >= 0 ) ? c >> diff : c << std::abs(diff);
+ flags |= ( diff >= 0 ) ? f >> diff : f << std::abs(diff);
+
+ /* If we are on the last prefix of the vector get back to the first and
+ ,move the mask in order to get the n + 1nth bit */
+ if ( ++bit == all_prefix.size() ) { bit = 0; ++i; mask >>= 1; }
+ }
+
+ /* Add the next flags + content to the output prefix */
+ p.content_.push_back(content);
+ p.flags_.push_back(flags);
+ p.size_ += 8;
+ }
+ }
+
+ return p;
+}
+
+Prefix Pht::linearize(Key k) const {
+ if (not validKey(k)) { throw std::invalid_argument(INVALID_KEY); }
+
+ std::vector<Prefix> all_prefix;
+ all_prefix.reserve(k.size());
+
+ /* Get the max size of the keyspec and take it for size limit (for padding) */
+ auto max = std::max_element(keySpec_.begin(), keySpec_.end(),
+ [](const std::pair<std::string, size_t>& a, const std::pair<std::string, size_t>& b) {
+ return a.second < b.second;
+ })->second + 1;
+
+ for ( auto const& it : k ) {
+ Prefix p = Blob {it.second.begin(), it.second.end()};
+ p.addPaddingContent(max);
+ p.updateFlags();
+
+ all_prefix.emplace_back(std::move(p));
+ }
+
+ return zcurve(all_prefix);
+}
+
+void Pht::getRealPrefix(const std::shared_ptr<Prefix>& p, IndexEntry entry, RealInsertCallback end_cb )
+{
+ if ( p->size_ == 0 ) {
+ end_cb(*p, std::move(entry));
+ return;
+ }
+
+ struct OpState {
+ unsigned entry_count {0}; /* Total number of data on 3 nodes */
+ unsigned ended {0}; /* How many ops have ended */
+ Prefix parent;
+ OpState(Prefix p) : parent(p) {}
+ };
+ auto op_state = std::make_shared<OpState>(p->getPrefix(-1));
+
+ auto pht_filter = [&](const dht::Value& v) {
+ return v.user_type.compare(0, name_.size(), name_) == 0;
+ };
+
+ /* Lambda will count total number of data node */
+ auto count = [=]( const std::shared_ptr<dht::Value>& value ) {
+ if (value->user_type != canary_)
+ op_state->entry_count++;
+ return true;
+ };
+
+ auto on_done = [=] ( bool ) {
+ op_state->ended++;
+ /* Only the last one do the CallBack*/
+ if (op_state->ended == 3) {
+ if (op_state->entry_count < MAX_NODE_ENTRY_COUNT)
+ end_cb(op_state->parent, std::move(entry));
+ else
+ end_cb(*p, std::move(entry));
+ }
+ };
+
+ dht_->get(op_state->parent.hash(),
+ count,
+ on_done,
+ pht_filter
+ );
+
+ dht_->get(p->hash(),
+ count,
+ on_done,
+ pht_filter
+ );
+
+ dht_->get(p->getSibling().hash(),
+ count,
+ on_done,
+ pht_filter
+ );
+}
+
+void Pht::checkPhtUpdate(Prefix p, IndexEntry entry, time_point time_p) {
+
+ Prefix full = entry.prefix;
+ if ( p.content_.size() * 8 >= full.content_.size() * 8 ) return;
+
+ auto next_prefix = full.getPrefix( p.size_ + 1 );
+
+ dht_->listen(next_prefix.hash(),
+ [=](const std::shared_ptr<dht::Value> &value) {
+ if (value->user_type == canary_) {
+ insert(full, entry, std::make_shared<int>(0), std::make_shared<int>(full.size_), time_p, false, nullptr);
+
+ /* Cancel listen since we found where we need to update*/
+ return false;
+ }
+
+ return true;
+ },
+ [=](const dht::Value& v) {
+ /* Filter value v thats start with the same name as ours */
+ return v.user_type.compare(0, name_.size(), name_) == 0;
+ }
+ );
+}
+
+void Pht::split(const Prefix& insert, const std::vector<std::shared_ptr<IndexEntry>>& vals, IndexEntry entry, RealInsertCallback end_cb ) {
+ const auto full = Prefix(entry.prefix);
+
+ auto loc = findSplitLocation(full, vals);
+ const auto prefix_to_insert = full.getPrefix(loc);
+
+ for(;loc != insert.size_ - 1; loc--) {
+ updateCanary(full.getPrefix(loc));
+ }
+
+ end_cb(prefix_to_insert, entry);
+}
+
+} /* indexation */
+
+} /* dht */
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "infohash.h"
+
+#include <functional>
+#include <sstream>
+#include <cstdio>
+
+namespace dht {
+
+const HexMap hex_map = {};
+
+void
+NodeExport::msgpack_unpack(msgpack::object o)
+{
+ if (o.type != msgpack::type::MAP)
+ throw msgpack::type_error();
+ if (o.via.map.size < 2)
+ throw msgpack::type_error();
+ if (o.via.map.ptr[0].key.as<std::string>() != "id")
+ throw msgpack::type_error();
+ if (o.via.map.ptr[1].key.as<std::string>() != "addr")
+ throw msgpack::type_error();
+ const auto& addr = o.via.map.ptr[1].val;
+ if (addr.type != msgpack::type::BIN)
+ throw msgpack::type_error();
+ if (addr.via.bin.size > sizeof(sockaddr_storage))
+ throw msgpack::type_error();
+ id.msgpack_unpack(o.via.map.ptr[0].val);
+ sslen = addr.via.bin.size;
+ std::copy_n(addr.via.bin.ptr, addr.via.bin.size, (char*)&ss);
+}
+
+std::ostream& operator<< (std::ostream& s, const NodeExport& h)
+{
+ msgpack::pack(s, h);
+ return s;
+}
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "value.h"
+#include "utils.h"
+#include "callbacks.h"
+
+namespace dht {
+
+/**
+ * Foreign nodes asking for updates about an InfoHash.
+ */
+struct Listener {
+ time_point time;
+ Query query;
+
+ Listener(time_point t, Query&& q) : time(t), query(std::move(q)) {}
+
+ void refresh(time_point t, Query&& q) {
+ time = t;
+ query = std::move(q);
+ }
+};
+
+/**
+ * A single "listen" operation data
+ */
+struct LocalListener {
+ Sp<Query> query;
+ Value::Filter filter;
+ ValueCallback get_cb;
+};
+
+
+struct SearchListener {
+ Sp<Query> query;
+ Value::Filter filter;
+ ValueCallback get_cb;
+};
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ *
+ * Author: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "log.h"
+
+#ifndef _WIN32
+#include <syslog.h>
+#endif
+
+#include <fstream>
+#include <chrono>
+
+namespace dht {
+namespace log {
+
+/**
+ * Print va_list to std::ostream (used for logging).
+ */
+void
+printLog(std::ostream& s, char const *m, va_list args) {
+ // print log to buffer
+ std::array<char, 8192> buffer;
+ int ret = vsnprintf(buffer.data(), buffer.size(), m, args);
+ if (ret < 0)
+ return;
+
+ // write timestamp
+ using namespace std::chrono;
+ using log_precision = microseconds;
+ constexpr auto den = log_precision::period::den;
+ auto num = duration_cast<log_precision>(steady_clock::now().time_since_epoch()).count();
+ s << "[" << std::setfill('0') << std::setw(6) << num / den << "."
+ << std::setfill('0') << std::setw(6) << num % den << "]" << " ";
+
+ // write log
+ s.write(buffer.data(), std::min((size_t) ret, buffer.size()));
+ if ((size_t) ret >= buffer.size())
+ s << "[[TRUNCATED]]";
+ s << std::endl;
+}
+
+void
+enableLogging(dht::DhtRunner &dht) {
+ dht.setLoggers(
+ [](char const *m, va_list args) {
+ std::cerr << red;
+ printLog(std::cerr, m, args);
+ std::cerr << def;
+ },
+ [](char const *m, va_list args) {
+ std::cout << yellow;
+ printLog(std::cout, m, args);
+ std::cout << def;
+ },
+ [](char const *m, va_list args) { printLog(std::cout, m, args); }
+ );
+}
+
+void
+enableFileLogging(dht::DhtRunner &dht, const std::string &path) {
+ auto logfile = std::make_shared<std::fstream>();
+ logfile->open(path, std::ios::out);
+
+ dht.setLoggers(
+ [=](char const *m, va_list args) { printLog(*logfile, m, args); },
+ [=](char const *m, va_list args) { printLog(*logfile, m, args); },
+ [=](char const *m, va_list args) { printLog(*logfile, m, args); }
+ );
+}
+
+OPENDHT_PUBLIC void
+enableSyslog(dht::DhtRunner &dht, const char* name) {
+#ifndef _WIN32
+ struct Syslog {
+ Syslog(const char* n) {
+ openlog(n, LOG_NDELAY, LOG_USER);
+ }
+ ~Syslog() {
+ closelog();
+ }
+ };
+ // syslog is global. Existing instance must be reused.
+ static std::weak_ptr<Syslog> opened_logfile;
+ auto logfile = opened_logfile.lock();
+ if (not logfile) {
+ logfile = std::make_shared<Syslog>(name);
+ opened_logfile = logfile;
+ }
+ dht.setLoggers(
+ [logfile](char const *m, va_list args) { vsyslog(LOG_ERR, m, args); },
+ [logfile](char const *m, va_list args) { vsyslog(LOG_WARNING, m, args); },
+ [logfile](char const *m, va_list args) { vsyslog(LOG_INFO, m, args); }
+ );
+#endif
+}
+
+void
+disableLogging(dht::DhtRunner &dht) {
+ dht.setLoggers(dht::NOLOG, dht::NOLOG, dht::NOLOG);
+}
+
+}
+}
--- /dev/null
+/*
+ * Copyright (C) 2017 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+namespace dht {
+namespace net {
+
+enum class MessageType {
+ Error = 0,
+ Reply,
+ Ping,
+ FindNode,
+ GetValues,
+ AnnounceValue,
+ Refresh,
+ Listen,
+ ValueData,
+ ValueUpdate
+};
+
+} /* namespace net */
+} /* dht */
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ * Simon Désaulniers <simon.desaulniers@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+
+#include "network_engine.h"
+#include "request.h"
+#include "default_types.h"
+#include "log_enable.h"
+#include "parsed_message.h"
+
+#include <msgpack.hpp>
+
+#ifndef _WIN32
+#include <arpa/inet.h>
+#include <unistd.h>
+#else
+#include <ws2tcpip.h>
+#include <io.h>
+#endif
+#include <fcntl.h>
+
+#include <cstring>
+
+namespace dht {
+namespace net {
+
+const std::string DhtProtocolException::GET_NO_INFOHASH {"Get_values with no info_hash"};
+const std::string DhtProtocolException::LISTEN_NO_INFOHASH {"Listen with no info_hash"};
+const std::string DhtProtocolException::LISTEN_WRONG_TOKEN {"Listen with wrong token"};
+const std::string DhtProtocolException::PUT_NO_INFOHASH {"Put with no info_hash"};
+const std::string DhtProtocolException::PUT_WRONG_TOKEN {"Put with wrong token"};
+const std::string DhtProtocolException::PUT_INVALID_ID {"Put with invalid id"};
+const std::string DhtProtocolException::STORAGE_NOT_FOUND {"Access operation for unknown storage"};
+
+constexpr std::chrono::seconds NetworkEngine::UDP_REPLY_TIME;
+constexpr std::chrono::seconds NetworkEngine::RX_MAX_PACKET_TIME;
+constexpr std::chrono::seconds NetworkEngine::RX_TIMEOUT;
+
+const std::string NetworkEngine::my_v {"RNG1"};
+constexpr size_t NetworkEngine::MAX_REQUESTS_PER_SEC;
+
+static const uint8_t v4prefix[16] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 0, 0, 0, 0
+};
+
+constexpr unsigned SEND_NODES {8};
+
+#ifdef _WIN32
+
+static bool
+set_nonblocking(int fd, int nonblocking)
+{
+ unsigned long mode = !!nonblocking;
+ int rc = ioctlsocket(fd, FIONBIO, &mode);
+ return rc == 0;
+}
+
+extern const char *inet_ntop(int, const void *, char *, socklen_t);
+
+#else
+
+static bool
+set_nonblocking(int fd, int nonblocking)
+{
+ int rc = fcntl(fd, F_GETFL, 0);
+ if (rc < 0)
+ return false;
+ rc = fcntl(fd, F_SETFL, nonblocking?(rc | O_NONBLOCK):(rc & ~O_NONBLOCK));
+ return rc >= 0;
+}
+
+#endif
+
+
+/* Transaction-ids are 4-bytes long, with the first two bytes identifying
+ * the kind of request, and the remaining two a sequence number in
+ * host order.
+ */
+struct TransId final : public std::array<uint8_t, 4> {
+ TransId() { std::fill(begin(), end(), 0); }
+ TransId(const std::array<char, 4>& o) { std::copy(o.begin(), o.end(), begin()); }
+ TransId(uint32_t id) {
+ *reinterpret_cast<uint32_t*>(data()) = htonl(id);
+ }
+
+ uint32_t toInt() const {
+ return ntohl(*reinterpret_cast<const uint32_t*>(&(*this)[0]));
+ }
+};
+
+
+struct NetworkEngine::PartialMessage {
+ SockAddr from;
+ time_point start;
+ time_point last_part;
+ std::unique_ptr<ParsedMessage> msg;
+};
+
+std::vector<Blob>
+serializeValues(const std::vector<Sp<Value>>& st)
+{
+ std::vector<Blob> svals;
+ svals.reserve(st.size());
+ for (const auto& v : st)
+ svals.emplace_back(packMsg(v));
+ return svals;
+}
+
+void
+packToken(msgpack::packer<msgpack::sbuffer>& pk, const Blob& token)
+{
+ pk.pack_bin(token.size());
+ pk.pack_bin_body((char*)token.data(), token.size());
+}
+
+RequestAnswer::RequestAnswer(ParsedMessage&& msg)
+ : ntoken(std::move(msg.token)),
+ values(std::move(msg.values)),
+ refreshed_values(std::move(msg.refreshed_values)),
+ expired_values(std::move(msg.expired_values)),
+ fields(std::move(msg.fields)),
+ nodes4(std::move(msg.nodes4)),
+ nodes6(std::move(msg.nodes6))
+{}
+
+NetworkEngine::NetworkEngine(Logger& log, Scheduler& scheduler, const int& s, const int& s6)
+ : myid(zeroes), dht_socket(s), dht_socket6(s6), DHT_LOG(log), scheduler(scheduler)
+{}
+NetworkEngine::NetworkEngine(InfoHash& myid, NetId net, const int& s, const int& s6, Logger& log, Scheduler& scheduler,
+ decltype(NetworkEngine::onError) onError,
+ decltype(NetworkEngine::onNewNode) onNewNode,
+ decltype(NetworkEngine::onReportedAddr) onReportedAddr,
+ decltype(NetworkEngine::onPing) onPing,
+ decltype(NetworkEngine::onFindNode) onFindNode,
+ decltype(NetworkEngine::onGetValues) onGetValues,
+ decltype(NetworkEngine::onListen) onListen,
+ decltype(NetworkEngine::onAnnounce) onAnnounce,
+ decltype(NetworkEngine::onRefresh) onRefresh) :
+ onError(onError), onNewNode(onNewNode), onReportedAddr(onReportedAddr), onPing(onPing), onFindNode(onFindNode),
+ onGetValues(onGetValues), onListen(onListen), onAnnounce(onAnnounce), onRefresh(onRefresh), myid(myid),
+ network(net), dht_socket(s), dht_socket6(s6), DHT_LOG(log), scheduler(scheduler)
+{
+ if (dht_socket >= 0) {
+ if (!set_nonblocking(dht_socket, 1))
+ throw DhtException("Can't set socket to non-blocking mode");
+ }
+ if (dht_socket6 >= 0) {
+ if (!set_nonblocking(dht_socket6, 1))
+ throw DhtException("Can't set socket to non-blocking mode");
+ }
+}
+
+NetworkEngine::~NetworkEngine() {
+ clear();
+}
+
+void
+NetworkEngine::tellListener(Sp<Node> node, Tid socket_id, const InfoHash& hash, want_t want,
+ const Blob& ntoken, std::vector<Sp<Node>>&& nodes,
+ std::vector<Sp<Node>>&& nodes6, std::vector<Sp<Value>>&& values,
+ const Query& query)
+{
+ auto nnodes = bufferNodes(node->getFamily(), hash, want, nodes, nodes6);
+ try {
+ sendNodesValues(node->getAddr(), socket_id, nnodes.first, nnodes.second, values, query, ntoken);
+ } catch (const std::overflow_error& e) {
+ DHT_LOG.e("Can't send value: buffer not large enough !");
+ }
+}
+
+void
+NetworkEngine::tellListenerRefreshed(Sp<Node> n, Tid socket_id, const InfoHash&, const Blob& token, const std::vector<Value::Id>& values)
+{
+ msgpack::sbuffer buffer;
+ msgpack::packer<msgpack::sbuffer> pk(&buffer);
+ pk.pack_map(4+(network?1:0));
+
+ pk.pack(std::string("u"));
+ pk.pack_map(1 + (not values.empty()?1:0) + (not token.empty()?1:0));
+ pk.pack(std::string("id")); pk.pack(myid);
+ if (not token.empty()) {
+ pk.pack(std::string("token")); packToken(pk, token);
+ }
+ if (not values.empty()) {
+ pk.pack(std::string("re"));
+ pk.pack(values);
+ DHT_LOG.d(n->id, "[node %s] sending %zu refreshed values", n->toString().c_str(), values.size());
+ }
+
+ pk.pack(std::string("t")); pk.pack(socket_id);
+ pk.pack(std::string("y")); pk.pack(std::string("r"));
+ pk.pack(std::string("v")); pk.pack(my_v);
+ if (network) {
+ pk.pack(std::string("n")); pk.pack(network);
+ }
+
+ // send response
+ send(buffer.data(), buffer.size(), 0, n->getAddr());
+}
+
+void
+NetworkEngine::tellListenerExpired(Sp<Node> n, Tid socket_id, const InfoHash&, const Blob& token, const std::vector<Value::Id>& values)
+{
+ msgpack::sbuffer buffer;
+ msgpack::packer<msgpack::sbuffer> pk(&buffer);
+ pk.pack_map(4+(network?1:0));
+
+ pk.pack(std::string("u"));
+ pk.pack_map(1 + (not values.empty()?1:0) + (not token.empty()?1:0));
+ pk.pack(std::string("id")); pk.pack(myid);
+ if (not token.empty()) {
+ pk.pack(std::string("token")); packToken(pk, token);
+ }
+ if (not values.empty()) {
+ pk.pack(std::string("exp"));
+ pk.pack(values);
+ DHT_LOG.d(n->id, "[node %s] sending %zu expired values", n->toString().c_str(), values.size());
+ }
+
+ pk.pack(std::string("t")); pk.pack(socket_id);
+ pk.pack(std::string("y")); pk.pack(std::string("r"));
+ pk.pack(std::string("v")); pk.pack(my_v);
+ if (network) {
+ pk.pack(std::string("n")); pk.pack(network);
+ }
+
+ // send response
+ send(buffer.data(), buffer.size(), 0, n->getAddr());
+}
+
+
+bool
+NetworkEngine::isRunning(sa_family_t af) const
+{
+ switch (af) {
+ case 0:
+ return dht_socket >= 0 || dht_socket6 >= 0;
+ case AF_INET:
+ return dht_socket >= 0;
+ case AF_INET6:
+ return dht_socket6 >= 0;
+ default:
+ return false;
+ }
+}
+
+void
+NetworkEngine::clear()
+{
+ for (auto& request : requests) {
+ request.second->cancel();
+ request.second->node->setExpired();
+ }
+ requests.clear();
+}
+
+void
+NetworkEngine::connectivityChanged(sa_family_t af)
+{
+ cache.clearBadNodes(af);
+}
+
+void
+NetworkEngine::requestStep(Sp<Request> sreq)
+{
+ auto& req = *sreq;
+ if (not req.pending())
+ return;
+
+ auto now = scheduler.time();
+ auto& node = *req.node;
+ if (req.isExpired(now)) {
+ DHT_LOG.d(node.id, "[node %s] expired !", node.toString().c_str());
+ node.setExpired();
+ if (not node.id)
+ requests.erase(req.tid);
+ return;
+ } else if (req.attempt_count == 1) {
+ req.on_expired(req, false);
+ }
+
+ auto err = send((char*)req.msg.data(), req.msg.size(),
+ (node.getReplyTime() >= now - UDP_REPLY_TIME) ? 0 : MSG_CONFIRM,
+ node.getAddr());
+ if (err == ENETUNREACH ||
+ err == EHOSTUNREACH ||
+ err == EAFNOSUPPORT ||
+ err == EPIPE ||
+ err == EPERM)
+ {
+ node.setExpired();
+ if (not node.id)
+ requests.erase(req.tid);
+ } else {
+ if (err != EAGAIN) {
+ ++req.attempt_count;
+ }
+ req.last_try = now;
+ std::weak_ptr<Request> wreq = sreq;
+ scheduler.add(req.last_try + Node::MAX_RESPONSE_TIME, [this,wreq] {
+ if (auto req = wreq.lock())
+ requestStep(req);
+ });
+ }
+}
+
+/**
+ * Sends a request to a node. Request::MAX_ATTEMPT_COUNT attempts will
+ * be made before the request expires.
+ */
+void
+NetworkEngine::sendRequest(const Sp<Request>& request)
+{
+ auto& node = request->node;
+ if (not node->id)
+ requests.emplace(request->tid, request);
+ request->start = scheduler.time();
+ node->requested(request);
+ requestStep(request);
+}
+
+
+/* Rate control for requests we receive. */
+bool
+NetworkEngine::rateLimit(const SockAddr& addr)
+{
+ const auto& now = scheduler.time();
+
+ // occasional IP limiter maintenance (a few times every second at max rate)
+ if (limiter_maintenance++ == MAX_REQUESTS_PER_SEC/8) {
+ for (auto it = address_rate_limiter.begin(); it != address_rate_limiter.end();) {
+ if (it->second.maintain(now) == 0)
+ address_rate_limiter.erase(it++);
+ else
+ ++it;
+ }
+ limiter_maintenance = 0;
+ }
+
+ auto it = address_rate_limiter.emplace(addr, IpLimiter{});
+ // invoke per IP, then global rate limiter
+ return it.first->second.limit(now) and rate_limiter.limit(now);
+}
+
+bool
+NetworkEngine::isMartian(const SockAddr& addr)
+{
+ if (addr.getPort() == 0)
+ return true;
+ switch(addr.getFamily()) {
+ case AF_INET: {
+ const auto& sin = addr.getIPv4();
+ const uint8_t* address = (const uint8_t*)&sin.sin_addr;
+ return (address[0] == 0) ||
+ ((address[0] & 0xE0) == 0xE0);
+ }
+ case AF_INET6: {
+ if (addr.getLength() < sizeof(sockaddr_in6))
+ return true;
+ const auto& sin6 = addr.getIPv6();
+ const uint8_t* address = (const uint8_t*)&sin6.sin6_addr;
+ return address[0] == 0xFF ||
+ (address[0] == 0xFE && (address[1] & 0xC0) == 0x80) ||
+ memcmp(address, zeroes.data(), 16) == 0 ||
+ memcmp(address, v4prefix, 12) == 0;
+ }
+ default:
+ return true;
+ }
+}
+
+/* The internal blacklist is an LRU cache of nodes that have sent
+ incorrect messages. */
+void
+NetworkEngine::blacklistNode(const Sp<Node>& n)
+{
+ n->setExpired();
+ blacklist.emplace(n->getAddr());
+}
+
+bool
+NetworkEngine::isNodeBlacklisted(const SockAddr& addr) const
+{
+ return blacklist.find(addr) != blacklist.end();
+}
+
+void
+NetworkEngine::processMessage(const uint8_t *buf, size_t buflen, const SockAddr& from)
+{
+ if (isMartian(from)) {
+ DHT_LOG.w("Received packet from martian node %s", from.toString().c_str());
+ return;
+ }
+
+ if (isNodeBlacklisted(from)) {
+ DHT_LOG.w("Received packet from blacklisted node %s", from.toString().c_str());
+ return;
+ }
+
+ std::unique_ptr<ParsedMessage> msg {new ParsedMessage};
+ try {
+ msgpack::unpacked msg_res = msgpack::unpack((const char*)buf, buflen);
+ msg->msgpack_unpack(msg_res.get());
+ } catch (const std::exception& e) {
+ DHT_LOG.w("Can't parse message of size %lu: %s", buflen, e.what());
+ //DHT_LOG.DBG.logPrintable(buf, buflen);
+ return;
+ }
+
+ if (msg->network != network) {
+ DHT_LOG.d("Received message from other network %u", msg->network);
+ return;
+ }
+
+ const auto& now = scheduler.time();
+
+ // partial value data
+ if (msg->type == MessageType::ValueData) {
+ auto pmsg_it = partial_messages.find(msg->tid);
+ if (pmsg_it == partial_messages.end()) {
+ if (logIncoming_)
+ DHT_LOG.d("Can't find partial message");
+ rateLimit(from);
+ return;
+ }
+ if (!pmsg_it->second.from.equals(from)) {
+ DHT_LOG.d("Received partial message data from unexpected IP address");
+ rateLimit(from);
+ return;
+ }
+ // append data block
+ if (pmsg_it->second.msg->append(*msg)) {
+ pmsg_it->second.last_part = now;
+ // check data completion
+ if (pmsg_it->second.msg->complete()) {
+ // process the full message
+ process(std::move(pmsg_it->second.msg), from);
+ partial_messages.erase(pmsg_it);
+ } else
+ scheduler.add(now + RX_TIMEOUT, std::bind(&NetworkEngine::maintainRxBuffer, this, msg->tid));
+ }
+ return;
+ }
+
+ if (msg->id == myid or not msg->id) {
+ DHT_LOG.d("Received message from self");
+ return;
+ }
+
+ if (msg->type > MessageType::Reply) {
+ /* Rate limit requests. */
+ if (!rateLimit(from)) {
+ DHT_LOG.w("Dropping request due to rate limiting");
+ return;
+ }
+ }
+
+ if (msg->value_parts.empty()) {
+ process(std::move(msg), from);
+ } else {
+ // starting partial message session
+ PartialMessage pmsg;
+ pmsg.from = from;
+ pmsg.msg = std::move(msg);
+ pmsg.start = now;
+ pmsg.last_part = now;
+ auto wmsg = partial_messages.emplace(pmsg.msg->tid, std::move(pmsg));
+ if (wmsg.second) {
+ scheduler.add(now + RX_MAX_PACKET_TIME, std::bind(&NetworkEngine::maintainRxBuffer, this, wmsg.first->first));
+ scheduler.add(now + RX_TIMEOUT, std::bind(&NetworkEngine::maintainRxBuffer, this, wmsg.first->first));
+ } else
+ DHT_LOG.e("Partial message with given TID already exists");
+ }
+}
+
+void
+NetworkEngine::process(std::unique_ptr<ParsedMessage>&& msg, const SockAddr& from)
+{
+ const auto& now = scheduler.time();
+ auto node = cache.getNode(msg->id, from, now, true, msg->is_client);
+
+ if (msg->type == MessageType::ValueUpdate) {
+ auto rsocket = node->getSocket(msg->tid);
+ if (not rsocket)
+ throw DhtProtocolException {DhtProtocolException::UNKNOWN_TID, "Can't find socket", msg->id};
+ node->received(now, {});
+ onNewNode(node, 2);
+ deserializeNodes(*msg, from);
+ rsocket->on_receive(node, std::move(*msg));
+ }
+ else if (msg->type == MessageType::Error or msg->type == MessageType::Reply) {
+ auto rsocket = node->getSocket(msg->tid);
+ auto req = node->getRequest(msg->tid);
+
+ /* either response for a request or data for an opened socket */
+ if (not req and not rsocket) {
+ auto req_it = requests.find(msg->tid);
+ if (req_it != requests.end() and not req_it->second->node->id) {
+ req = req_it->second;
+ req->node = node;
+ requests.erase(req_it);
+ } else {
+ node->received(now, req);
+ if (not node->isClient())
+ onNewNode(node, 1);
+ throw DhtProtocolException {DhtProtocolException::UNKNOWN_TID, "Can't find transaction", msg->id};
+ }
+ }
+
+ node->received(now, req);
+
+ if (not node->isClient())
+ onNewNode(node, 2);
+ onReportedAddr(msg->id, msg->addr);
+
+ if (req and (req->cancelled() or req->expired() or req->completed())) {
+ DHT_LOG.w(node->id, "[node %s] response to expired, cancelled or completed request", node->toString().c_str());
+ return;
+ }
+
+ switch (msg->type) {
+ case MessageType::Error: {
+ if (msg->id and req and (
+ (msg->error_code == DhtProtocolException::NOT_FOUND and req->getType() == MessageType::Refresh) or
+ (msg->error_code == DhtProtocolException::UNAUTHORIZED and (req->getType() == MessageType::AnnounceValue
+ or req->getType() == MessageType::Listen))))
+ {
+ req->last_try = time_point::min();
+ req->reply_time = time_point::min();
+ onError(req, DhtProtocolException {msg->error_code});
+ } else {
+ if (logIncoming_)
+ DHT_LOG.w(msg->id, "[node %s %s] received unknown error message %u",
+ msg->id.toString().c_str(), from.toString().c_str(), msg->error_code);
+ }
+ break;
+ }
+ case MessageType::Reply:
+ if (req) { /* request reply */
+ auto& r = *req;
+ if (r.getType() == MessageType::AnnounceValue or r.getType() == MessageType::Listen)
+ r.node->authSuccess();
+ r.reply_time = scheduler.time();
+
+ deserializeNodes(*msg, from);
+ r.setDone(std::move(*msg));
+ break;
+ } else { /* request socket data */
+ deserializeNodes(*msg, from);
+ rsocket->on_receive(node, std::move(*msg));
+ }
+ break;
+ default:
+ break;
+ }
+ } else {
+ node->received(now, {});
+ if (not node->isClient())
+ onNewNode(node, 1);
+ try {
+ switch (msg->type) {
+ case MessageType::Ping:
+ ++in_stats.ping;
+ if (logIncoming_)
+ DHT_LOG.d(node->id, "[node %s] sending pong", node->toString().c_str());
+ onPing(node);
+ sendPong(from, msg->tid);
+ break;
+ case MessageType::FindNode: {
+ //DHT_LOG.d(msg->target, node->id, "[node %s] got 'find' request for %s (%d)", node->toString().c_str(), msg->target.toString().c_str(), msg->want);
+ ++in_stats.find;
+ RequestAnswer answer = onFindNode(node, msg->target, msg->want);
+ auto nnodes = bufferNodes(from.getFamily(), msg->target, msg->want, answer.nodes4, answer.nodes6);
+ sendNodesValues(from, msg->tid, nnodes.first, nnodes.second, {}, {}, answer.ntoken);
+ break;
+ }
+ case MessageType::GetValues: {
+ //DHT_LOG.d(msg->info_hash, node->id, "[node %s] got 'get' request for %s", node->toString().c_str(), msg->info_hash.toString().c_str());
+ ++in_stats.get;
+ RequestAnswer answer = onGetValues(node, msg->info_hash, msg->want, msg->query);
+ auto nnodes = bufferNodes(from.getFamily(), msg->info_hash, msg->want, answer.nodes4, answer.nodes6);
+ sendNodesValues(from, msg->tid, nnodes.first, nnodes.second, answer.values, msg->query, answer.ntoken);
+ break;
+ }
+ case MessageType::AnnounceValue: {
+ if (logIncoming_)
+ DHT_LOG.d(msg->info_hash, node->id, "[node %s] got 'put' request for %s", node->toString().c_str(), msg->info_hash.toString().c_str());
+ ++in_stats.put;
+ onAnnounce(node, msg->info_hash, msg->token, msg->values, msg->created);
+
+ /* Note that if storageStore failed, we lie to the requestor.
+ This is to prevent them from backtracking, and hence
+ polluting the DHT. */
+ for (auto& v : msg->values) {
+ sendValueAnnounced(from, msg->tid, v->id);
+ }
+ break;
+ }
+ case MessageType::Refresh:
+ if (logIncoming_)
+ DHT_LOG.d(msg->info_hash, node->id, "[node %s] got 'refresh' request for %s", node->toString().c_str(), msg->info_hash.toString().c_str());
+ onRefresh(node, msg->info_hash, msg->token, msg->value_id);
+ /* Same note as above in MessageType::AnnounceValue applies. */
+ sendValueAnnounced(from, msg->tid, msg->value_id);
+ break;
+ case MessageType::Listen: {
+ if (logIncoming_)
+ DHT_LOG.d(msg->info_hash, node->id, "[node %s] got 'listen' request for %s", node->toString().c_str(), msg->info_hash.toString().c_str());
+ ++in_stats.listen;
+ RequestAnswer answer = onListen(node, msg->info_hash, msg->token, msg->socket_id, std::move(msg->query));
+ auto nnodes = bufferNodes(from.getFamily(), msg->info_hash, msg->want, answer.nodes4, answer.nodes6);
+ sendListenConfirmation(from, msg->tid);
+ break;
+ }
+ default:
+ break;
+ }
+ } catch (const std::overflow_error& e) {
+ DHT_LOG.e("Can't send value: buffer not large enough !");
+ } catch (DhtProtocolException& e) {
+ sendError(from, msg->tid, e.getCode(), e.getMsg().c_str(), true);
+ }
+ }
+}
+
+void
+insertAddr(msgpack::packer<msgpack::sbuffer>& pk, const SockAddr& addr)
+{
+ size_t addr_len = std::min<size_t>(addr.getLength(),
+ (addr.getFamily() == AF_INET) ? sizeof(in_addr) : sizeof(in6_addr));
+ void* addr_ptr = (addr.getFamily() == AF_INET) ? (void*)&addr.getIPv4().sin_addr
+ : (void*)&addr.getIPv6().sin6_addr;
+ pk.pack("sa");
+ pk.pack_bin(addr_len);
+ pk.pack_bin_body((char*)addr_ptr, addr_len);
+}
+
+int
+NetworkEngine::send(const char *buf, size_t len, int flags, const SockAddr& addr)
+{
+ if (not addr)
+ return EFAULT;
+
+ int s;
+ if (addr.getFamily() == AF_INET)
+ s = dht_socket;
+ else if (addr.getFamily() == AF_INET6)
+ s = dht_socket6;
+ else
+ s = -1;
+
+ if (s < 0)
+ return EAFNOSUPPORT;
+#ifdef MSG_NOSIGNAL
+ flags |= MSG_NOSIGNAL;
+#endif
+ if (sendto(s, buf, len, flags, addr.get(), addr.getLength()) == -1) {
+ int err = errno;
+ DHT_LOG.e("Can't send message to %s: %s", addr.toString().c_str(), strerror(err));
+ if (err == EPIPE) {
+ throw SocketException(EPIPE);
+ }
+ return err;
+ }
+ return 0;
+}
+
+Sp<Request>
+NetworkEngine::sendPing(Sp<Node> node, RequestCb&& on_done, RequestExpiredCb&& on_expired) {
+ TransId tid (node->getNewTid());
+ msgpack::sbuffer buffer;
+ msgpack::packer<msgpack::sbuffer> pk(&buffer);
+ pk.pack_map(5+(network?1:0));
+
+ pk.pack(std::string("a")); pk.pack_map(1);
+ pk.pack(std::string("id")); pk.pack(myid);
+
+ pk.pack(std::string("q")); pk.pack(std::string("ping"));
+ pk.pack(std::string("t")); pk.pack_bin(tid.size());
+ pk.pack_bin_body((const char*)tid.data(), tid.size());
+ pk.pack(std::string("y")); pk.pack(std::string("q"));
+ pk.pack(std::string("v")); pk.pack(my_v);
+ if (network) {
+ pk.pack(std::string("n")); pk.pack(network);
+ }
+
+ auto req = std::make_shared<Request>(MessageType::Ping, tid.toInt(), node,
+ Blob(buffer.data(), buffer.data() + buffer.size()),
+ [=](const Request& req_status, ParsedMessage&&) {
+ DHT_LOG.d(req_status.node->id, "[node %s] got pong !", req_status.node->toString().c_str());
+ if (on_done) {
+ on_done(req_status, {});
+ }
+ },
+ [=](const Request& req_status, bool done) { /* on expired */
+ if (on_expired) {
+ on_expired(req_status, done);
+ }
+ }
+ );
+ sendRequest(req);
+ ++out_stats.ping;
+ return req;
+}
+
+void
+NetworkEngine::sendPong(const SockAddr& addr, Tid tid) {
+ msgpack::sbuffer buffer;
+ msgpack::packer<msgpack::sbuffer> pk(&buffer);
+ pk.pack_map(4+(network?1:0));
+
+ pk.pack(std::string("r")); pk.pack_map(2);
+ pk.pack(std::string("id")); pk.pack(myid);
+ insertAddr(pk, addr);
+
+ TransId t (tid);
+ pk.pack(std::string("t")); pk.pack_bin(t.size());
+ pk.pack_bin_body((const char*)t.data(), t.size());
+ pk.pack(std::string("y")); pk.pack(std::string("r"));
+ pk.pack(std::string("v")); pk.pack(my_v);
+ if (network) {
+ pk.pack(std::string("n")); pk.pack(network);
+ }
+
+ send(buffer.data(), buffer.size(), 0, addr);
+}
+
+Sp<Request>
+NetworkEngine::sendFindNode(Sp<Node> n, const InfoHash& target, want_t want,
+ RequestCb&& on_done, RequestExpiredCb&& on_expired) {
+ TransId tid (n->getNewTid());
+ msgpack::sbuffer buffer;
+ msgpack::packer<msgpack::sbuffer> pk(&buffer);
+ pk.pack_map(5+(network?1:0));
+
+ pk.pack(std::string("a")); pk.pack_map(2 + (want>0?1:0));
+ pk.pack(std::string("id")); pk.pack(myid);
+ pk.pack(std::string("target")); pk.pack(target);
+ if (want > 0) {
+ pk.pack(std::string("w"));
+ pk.pack_array(((want & WANT4)?1:0) + ((want & WANT6)?1:0));
+ if (want & WANT4) pk.pack(AF_INET);
+ if (want & WANT6) pk.pack(AF_INET6);
+ }
+
+ pk.pack(std::string("q")); pk.pack(std::string("find"));
+ pk.pack(std::string("t")); pk.pack_bin(tid.size());
+ pk.pack_bin_body((const char*)tid.data(), tid.size());
+ pk.pack(std::string("y")); pk.pack(std::string("q"));
+ pk.pack(std::string("v")); pk.pack(my_v);
+ if (network) {
+ pk.pack(std::string("n")); pk.pack(network);
+ }
+
+ auto req = std::make_shared<Request>(MessageType::FindNode, tid.toInt(), n,
+ Blob(buffer.data(), buffer.data() + buffer.size()),
+ [=](const Request& req_status, ParsedMessage&& msg) { /* on done */
+ if (on_done) {
+ on_done(req_status, {std::forward<ParsedMessage>(msg)});
+ }
+ },
+ [=](const Request& req_status, bool done) { /* on expired */
+ if (on_expired) {
+ on_expired(req_status, done);
+ }
+ }
+ );
+ sendRequest(req);
+ ++out_stats.find;
+ return req;
+}
+
+
+Sp<Request>
+NetworkEngine::sendGetValues(Sp<Node> n, const InfoHash& info_hash, const Query& query, want_t want,
+ RequestCb&& on_done, RequestExpiredCb&& on_expired) {
+ TransId tid (n->getNewTid());
+ msgpack::sbuffer buffer;
+ msgpack::packer<msgpack::sbuffer> pk(&buffer);
+ pk.pack_map(5+(network?1:0));
+
+ pk.pack(std::string("a")); pk.pack_map(2 +
+ (query.where.getFilter() or not query.select.getSelection().empty() ? 1:0) +
+ (want>0?1:0));
+ pk.pack(std::string("id")); pk.pack(myid);
+ pk.pack(std::string("h")); pk.pack(info_hash);
+ pk.pack(std::string("q")); pk.pack(query);
+ if (want > 0) {
+ pk.pack(std::string("w"));
+ pk.pack_array(((want & WANT4)?1:0) + ((want & WANT6)?1:0));
+ if (want & WANT4) pk.pack(AF_INET);
+ if (want & WANT6) pk.pack(AF_INET6);
+ }
+
+ pk.pack(std::string("q")); pk.pack(std::string("get"));
+ pk.pack(std::string("t")); pk.pack_bin(tid.size());
+ pk.pack_bin_body((const char*)tid.data(), tid.size());
+ pk.pack(std::string("y")); pk.pack(std::string("q"));
+ pk.pack(std::string("v")); pk.pack(my_v);
+ if (network) {
+ pk.pack(std::string("n")); pk.pack(network);
+ }
+
+ auto req = std::make_shared<Request>(MessageType::GetValues, tid.toInt(), n,
+ Blob(buffer.data(), buffer.data() + buffer.size()),
+ [=](const Request& req_status, ParsedMessage&& msg) { /* on done */
+ if (on_done) {
+ on_done(req_status, {std::forward<ParsedMessage>(msg)});
+ }
+ },
+ [=](const Request& req_status, bool done) { /* on expired */
+ if (on_expired) {
+ on_expired(req_status, done);
+ }
+ }
+ );
+ sendRequest(req);
+ ++out_stats.get;
+ return req;
+}
+
+SockAddr deserializeIPv4(const uint8_t* ni) {
+ SockAddr addr;
+ addr.setFamily(AF_INET);
+ auto& sin = addr.getIPv4();
+ std::memcpy(&sin.sin_addr, ni, 4);
+ std::memcpy(&sin.sin_port, ni + 4, 2);
+ return addr;
+}
+SockAddr deserializeIPv6(const uint8_t* ni) {
+ SockAddr addr;
+ addr.setFamily(AF_INET6);
+ auto& sin6 = addr.getIPv6();
+ std::memcpy(&sin6.sin6_addr, ni, 16);
+ std::memcpy(&sin6.sin6_port, ni + 16, 2);
+ return addr;
+}
+
+void
+NetworkEngine::deserializeNodes(ParsedMessage& msg, const SockAddr& from) {
+ if (msg.nodes4_raw.size() % NODE4_INFO_BUF_LEN != 0 || msg.nodes6_raw.size() % NODE6_INFO_BUF_LEN != 0) {
+ throw DhtProtocolException {DhtProtocolException::WRONG_NODE_INFO_BUF_LEN};
+ }
+ // deserialize nodes
+ const auto& now = scheduler.time();
+ for (unsigned i = 0, n = msg.nodes4_raw.size() / NODE4_INFO_BUF_LEN; i < n; i++) {
+ const uint8_t* ni = msg.nodes4_raw.data() + i * NODE4_INFO_BUF_LEN;
+ const auto& ni_id = *reinterpret_cast<const InfoHash*>(ni);
+ if (ni_id == myid)
+ continue;
+ SockAddr addr = deserializeIPv4(ni + ni_id.size());
+ if (addr.isLoopback() and from.getFamily() == AF_INET) {
+ auto port = addr.getPort();
+ addr = from;
+ addr.setPort(port);
+ }
+ if (isMartian(addr) || isNodeBlacklisted(addr))
+ continue;
+ msg.nodes4.emplace_back(cache.getNode(ni_id, addr, now, false));
+ onNewNode(msg.nodes4.back(), 0);
+ }
+ for (unsigned i = 0, n = msg.nodes6_raw.size() / NODE6_INFO_BUF_LEN; i < n; i++) {
+ const uint8_t* ni = msg.nodes6_raw.data() + i * NODE6_INFO_BUF_LEN;
+ const auto& ni_id = *reinterpret_cast<const InfoHash*>(ni);
+ if (ni_id == myid)
+ continue;
+ SockAddr addr = deserializeIPv6(ni + ni_id.size());
+ if (addr.isLoopback() and from.getFamily() == AF_INET6) {
+ auto port = addr.getPort();
+ addr = from;
+ addr.setPort(port);
+ }
+ if (isMartian(addr) || isNodeBlacklisted(addr))
+ continue;
+ msg.nodes6.emplace_back(cache.getNode(ni_id, addr, now, false));
+ onNewNode(msg.nodes6.back(), 0);
+ }
+}
+
+std::vector<Blob>
+NetworkEngine::packValueHeader(msgpack::sbuffer& buffer, const std::vector<Sp<Value>>& st)
+{
+ auto svals = serializeValues(st);
+ size_t total_size = 0;
+ for (const auto& v : svals)
+ total_size += v.size();
+
+ msgpack::packer<msgpack::sbuffer> pk(&buffer);
+ pk.pack(std::string("values"));
+ pk.pack_array(svals.size());
+ // try to put everything in a single UDP packet
+ if (svals.size() < 50 && total_size < MAX_PACKET_VALUE_SIZE) {
+ for (const auto& b : svals)
+ buffer.write((const char*)b.data(), b.size());
+ DHT_LOG.d("sending %lu bytes of values", total_size);
+ svals.clear();
+ } else {
+ for (const auto& b : svals)
+ pk.pack(b.size());
+ }
+ return svals;
+}
+
+void
+NetworkEngine::sendValueParts(const TransId& tid, const std::vector<Blob>& svals, const SockAddr& addr)
+{
+ msgpack::sbuffer buffer;
+ unsigned i=0;
+ for (const auto& v: svals) {
+ size_t start {0}, end;
+ do {
+ end = std::min(start + MTU, v.size());
+ buffer.clear();
+ msgpack::packer<msgpack::sbuffer> pk(&buffer);
+ pk.pack_map(3+(network?1:0));
+ if (network) {
+ pk.pack(std::string("n")); pk.pack(network);
+ }
+ pk.pack(std::string("y")); pk.pack(std::string("v"));
+ pk.pack(std::string("t")); pk.pack_bin(tid.size());
+ pk.pack_bin_body((const char*)tid.data(), tid.size());
+ pk.pack(std::string("p")); pk.pack_map(1);
+ pk.pack(i); pk.pack_map(2);
+ pk.pack(std::string("o")); pk.pack(start);
+ pk.pack(std::string("d")); pk.pack_bin(end-start);
+ pk.pack_bin_body((const char*)v.data()+start, end-start);
+ send(buffer.data(), buffer.size(), 0, addr);
+ start = end;
+ } while (start != v.size());
+ i++;
+ }
+}
+
+void
+NetworkEngine::sendNodesValues(const SockAddr& addr, Tid tid, const Blob& nodes, const Blob& nodes6,
+ const std::vector<Sp<Value>>& st, const Query& query, const Blob& token)
+{
+ msgpack::sbuffer buffer;
+ msgpack::packer<msgpack::sbuffer> pk(&buffer);
+ pk.pack_map(4+(network?1:0));
+
+ pk.pack(std::string("r"));
+ pk.pack_map(2 + (not st.empty()?1:0) + (nodes.size()>0?1:0) + (nodes6.size()>0?1:0) + (not token.empty()?1:0));
+ pk.pack(std::string("id")); pk.pack(myid);
+ insertAddr(pk, addr);
+ if (nodes.size() > 0) {
+ pk.pack(std::string("n4"));
+ pk.pack_bin(nodes.size());
+ pk.pack_bin_body((const char*)nodes.data(), nodes.size());
+ }
+ if (nodes6.size() > 0) {
+ pk.pack(std::string("n6"));
+ pk.pack_bin(nodes6.size());
+ pk.pack_bin_body((const char*)nodes6.data(), nodes6.size());
+ }
+ if (not token.empty()) {
+ pk.pack(std::string("token")); packToken(pk, token);
+ }
+ std::vector<Blob> svals {};
+ if (not st.empty()) { /* pack complete values */
+ auto fields = query.select.getSelection();
+ if (fields.empty()) {
+ svals = packValueHeader(buffer, st);
+ } else { /* pack fields */
+ pk.pack(std::string("fields"));
+ pk.pack_map(2);
+ pk.pack(std::string("f")); pk.pack(fields);
+ pk.pack(std::string("v")); pk.pack_array(st.size()*fields.size());
+ for (const auto& v : st)
+ v->msgpack_pack_fields(fields, pk);
+ //DHT_LOG_DBG("sending closest nodes (%d+%d nodes.), %u value headers containing %u fields",
+ // nodes.size(), nodes6.size(), st.size(), fields.size());
+ }
+ }
+
+ TransId t (tid);
+ pk.pack(std::string("t")); pk.pack_bin(t.size());
+ pk.pack_bin_body((const char*)t.data(), t.size());
+ pk.pack(std::string("y")); pk.pack(std::string("r"));
+ pk.pack(std::string("v")); pk.pack(my_v);
+ if (network) {
+ pk.pack(std::string("n")); pk.pack(network);
+ }
+
+ // send response
+ send(buffer.data(), buffer.size(), 0, addr);
+
+ // send parts
+ if (not svals.empty())
+ sendValueParts(tid, svals, addr);
+}
+
+Blob
+NetworkEngine::bufferNodes(sa_family_t af, const InfoHash& id, std::vector<Sp<Node>>& nodes)
+{
+ std::sort(nodes.begin(), nodes.end(), [&](const Sp<Node>& a, const Sp<Node>& b){
+ return id.xorCmp(a->id, b->id) < 0;
+ });
+ size_t nnode = std::min<size_t>(SEND_NODES, nodes.size());
+ Blob bnodes;
+ if (af == AF_INET) {
+ bnodes.resize(NODE4_INFO_BUF_LEN * nnode);
+ for (size_t i=0; i<nnode; i++) {
+ const Node& n = *nodes[i];
+ const auto& sin = n.getAddr().getIPv4();
+ auto dest = bnodes.data() + NODE4_INFO_BUF_LEN * i;
+ memcpy(dest, n.id.data(), HASH_LEN);
+ memcpy(dest + HASH_LEN, &sin.sin_addr, sizeof(in_addr));
+ memcpy(dest + HASH_LEN + sizeof(in_addr), &sin.sin_port, sizeof(in_port_t));
+ }
+ } else if (af == AF_INET6) {
+ bnodes.resize(NODE6_INFO_BUF_LEN * nnode);
+ for (size_t i=0; i<nnode; i++) {
+ const Node& n = *nodes[i];
+ const auto& sin6 = n.getAddr().getIPv6();
+ auto dest = bnodes.data() + NODE6_INFO_BUF_LEN * i;
+ memcpy(dest, n.id.data(), HASH_LEN);
+ memcpy(dest + HASH_LEN, &sin6.sin6_addr, sizeof(in6_addr));
+ memcpy(dest + HASH_LEN + sizeof(in6_addr), &sin6.sin6_port, sizeof(in_port_t));
+ }
+ }
+ return bnodes;
+}
+
+std::pair<Blob, Blob>
+NetworkEngine::bufferNodes(sa_family_t af, const InfoHash& id, want_t want,
+ std::vector<Sp<Node>>& nodes4, std::vector<Sp<Node>>& nodes6)
+{
+ if (want < 0)
+ want = af == AF_INET ? WANT4 : WANT6;
+
+ Blob bnodes4;
+ if (want & WANT4)
+ bnodes4 = bufferNodes(AF_INET, id, nodes4);
+
+ Blob bnodes6;
+ if (want & WANT6)
+ bnodes6 = bufferNodes(AF_INET6, id, nodes6);
+
+ return {std::move(bnodes4), std::move(bnodes6)};
+}
+
+Sp<Request>
+NetworkEngine::sendListen(Sp<Node> n,
+ const InfoHash& hash,
+ const Query& query,
+ const Blob& token,
+ Sp<Request> previous,
+ RequestCb&& on_done,
+ RequestExpiredCb&& on_expired,
+ SocketCb&& socket_cb)
+{
+ Tid socket;
+ TransId tid (n->getNewTid());
+ if (previous and previous->node == n) {
+ socket = previous->getSocket();
+ } else {
+ if (previous)
+ DHT_LOG.e(hash, "[node %s] trying refresh listen contract with wrong node", previous->node->toString().c_str());
+ socket = n->openSocket(std::move(socket_cb));
+ }
+
+ if (not socket) {
+ DHT_LOG.e(hash, "[node %s] unable to get a valid socket for listen. Aborting listen", n->toString().c_str());
+ return {};
+ }
+ TransId sid(socket);
+
+ msgpack::sbuffer buffer;
+ msgpack::packer<msgpack::sbuffer> pk(&buffer);
+ pk.pack_map(5+(network?1:0));
+
+ auto has_query = query.where.getFilter() or not query.select.getSelection().empty();
+ pk.pack(std::string("a")); pk.pack_map(4 + has_query);
+ pk.pack(std::string("id")); pk.pack(myid);
+ pk.pack(std::string("h")); pk.pack(hash);
+ pk.pack(std::string("token")); packToken(pk, token);
+ pk.pack(std::string("sid")); pk.pack_bin(sid.size());
+ pk.pack_bin_body((const char*)sid.data(), sid.size());
+ if (has_query) {
+ pk.pack(std::string("q")); pk.pack(query);
+ }
+
+ pk.pack(std::string("q")); pk.pack(std::string("listen"));
+ pk.pack(std::string("t")); pk.pack_bin(tid.size());
+ pk.pack_bin_body((const char*)tid.data(), tid.size());
+ pk.pack(std::string("y")); pk.pack(std::string("q"));
+ pk.pack(std::string("v")); pk.pack(my_v);
+ if (network) {
+ pk.pack(std::string("n")); pk.pack(network);
+ }
+
+ auto req = std::make_shared<Request>(MessageType::Listen, tid.toInt(), n,
+ Blob(buffer.data(), buffer.data() + buffer.size()),
+ [=](const Request& req_status, ParsedMessage&& msg) { /* on done */
+ if (on_done)
+ on_done(req_status, {std::forward<ParsedMessage>(msg)});
+ },
+ [=](const Request& req_status, bool done) { /* on expired */
+ if (on_expired)
+ on_expired(req_status, done);
+ },
+ socket
+ );
+ sendRequest(req);
+ ++out_stats.listen;
+ return req;
+}
+
+void
+NetworkEngine::sendListenConfirmation(const SockAddr& addr, Tid tid) {
+ msgpack::sbuffer buffer;
+ msgpack::packer<msgpack::sbuffer> pk(&buffer);
+ pk.pack_map(4+(network?1:0));
+
+ pk.pack(std::string("r")); pk.pack_map(2);
+ pk.pack(std::string("id")); pk.pack(myid);
+ insertAddr(pk, addr);
+
+ TransId t (tid);
+ pk.pack(std::string("t")); pk.pack_bin(t.size());
+ pk.pack_bin_body((const char*)t.data(), t.size());
+ pk.pack(std::string("y")); pk.pack(std::string("r"));
+ pk.pack(std::string("v")); pk.pack(my_v);
+ if (network) {
+ pk.pack(std::string("n")); pk.pack(network);
+ }
+
+ send(buffer.data(), buffer.size(), 0, addr);
+}
+
+Sp<Request>
+NetworkEngine::sendAnnounceValue(Sp<Node> n,
+ const InfoHash& infohash,
+ const Sp<Value>& value,
+ time_point created,
+ const Blob& token,
+ RequestCb&& on_done,
+ RequestExpiredCb&& on_expired)
+{
+ TransId tid (n->getNewTid());
+ msgpack::sbuffer buffer;
+ msgpack::packer<msgpack::sbuffer> pk(&buffer);
+ pk.pack_map(5+(network?1:0));
+
+ pk.pack(std::string("a")); pk.pack_map((created < scheduler.time() ? 5 : 4));
+ pk.pack(std::string("id")); pk.pack(myid);
+ pk.pack(std::string("h")); pk.pack(infohash);
+ auto v = packValueHeader(buffer, {value});
+ if (created < scheduler.time()) {
+ pk.pack(std::string("c"));
+ pk.pack(to_time_t(created));
+ }
+ pk.pack(std::string("token")); pk.pack(token);
+
+ pk.pack(std::string("q")); pk.pack(std::string("put"));
+ pk.pack(std::string("t")); pk.pack_bin(tid.size());
+ pk.pack_bin_body((const char*)tid.data(), tid.size());
+ pk.pack(std::string("y")); pk.pack(std::string("q"));
+ pk.pack(std::string("v")); pk.pack(my_v);
+ if (network) {
+ pk.pack(std::string("n")); pk.pack(network);
+ }
+
+ auto req = std::make_shared<Request>(MessageType::AnnounceValue, tid.toInt(), n,
+ Blob(buffer.data(), buffer.data() + buffer.size()),
+ [=](const Request& req_status, ParsedMessage&& msg) { /* on done */
+ if (msg.value_id == Value::INVALID_ID) {
+ DHT_LOG.d(infohash, "Unknown search or announce!");
+ } else {
+ if (on_done) {
+ RequestAnswer answer {};
+ answer.vid = msg.value_id;
+ on_done(req_status, std::move(answer));
+ }
+ }
+ },
+ [=](const Request& req_status, bool done) { /* on expired */
+ if (on_expired) {
+ on_expired(req_status, done);
+ }
+ }
+ );
+ sendRequest(req);
+ if (not v.empty())
+ sendValueParts(tid, v, n->getAddr());
+ ++out_stats.put;
+ return req;
+}
+
+Sp<Request>
+NetworkEngine::sendRefreshValue(Sp<Node> n,
+ const InfoHash& infohash,
+ const Value::Id& vid,
+ const Blob& token,
+ RequestCb&& on_done,
+ RequestExpiredCb&& on_expired)
+{
+ TransId tid (n->getNewTid());
+ msgpack::sbuffer buffer;
+ msgpack::packer<msgpack::sbuffer> pk(&buffer);
+ pk.pack_map(5+(network?1:0));
+
+ pk.pack(std::string("a")); pk.pack_map(4);
+ pk.pack(std::string("id")); pk.pack(myid);
+ pk.pack(std::string("h")); pk.pack(infohash);
+ pk.pack(std::string("vid")); pk.pack(vid);
+ pk.pack(std::string("token")); pk.pack(token);
+
+ pk.pack(std::string("q")); pk.pack(std::string("refresh"));
+ pk.pack(std::string("t")); pk.pack_bin(tid.size());
+ pk.pack_bin_body((const char*)tid.data(), tid.size());
+ pk.pack(std::string("y")); pk.pack(std::string("q"));
+ pk.pack(std::string("v")); pk.pack(my_v);
+ if (network) {
+ pk.pack(std::string("n")); pk.pack(network);
+ }
+
+ auto req = std::make_shared<Request>(MessageType::Refresh, tid.toInt(), n,
+ Blob(buffer.data(), buffer.data() + buffer.size()),
+ [=](const Request& req_status, ParsedMessage&& msg) { /* on done */
+ if (msg.value_id == Value::INVALID_ID) {
+ DHT_LOG.d(infohash, "Unknown search or announce!");
+ } else {
+ if (on_done) {
+ RequestAnswer answer {};
+ answer.vid = msg.value_id;
+ on_done(req_status, std::move(answer));
+ }
+ }
+ },
+ [=](const Request& req_status, bool done) { /* on expired */
+ if (on_expired) {
+ on_expired(req_status, done);
+ }
+ }
+ );
+ sendRequest(req);
+ ++out_stats.refresh;
+ return req;
+}
+
+void
+NetworkEngine::sendValueAnnounced(const SockAddr& addr, Tid tid, Value::Id vid) {
+ msgpack::sbuffer buffer;
+ msgpack::packer<msgpack::sbuffer> pk(&buffer);
+ pk.pack_map(4+(network?1:0));
+
+ pk.pack(std::string("r")); pk.pack_map(3);
+ pk.pack(std::string("id")); pk.pack(myid);
+ pk.pack(std::string("vid")); pk.pack(vid);
+ insertAddr(pk, addr);
+
+ TransId t(tid);
+ pk.pack(std::string("t")); pk.pack_bin(t.size());
+ pk.pack_bin_body((const char*)t.data(), t.size());
+ pk.pack(std::string("y")); pk.pack(std::string("r"));
+ pk.pack(std::string("v")); pk.pack(my_v);
+ if (network) {
+ pk.pack(std::string("n")); pk.pack(network);
+ }
+
+ send(buffer.data(), buffer.size(), 0, addr);
+}
+
+void
+NetworkEngine::sendError(const SockAddr& addr,
+ Tid tid,
+ uint16_t code,
+ const std::string& message,
+ bool include_id)
+{
+ msgpack::sbuffer buffer;
+ msgpack::packer<msgpack::sbuffer> pk(&buffer);
+ pk.pack_map(4 + (include_id?1:0));
+
+ pk.pack(std::string("e")); pk.pack_array(2);
+ pk.pack(code);
+ pk.pack(message);
+
+ if (include_id) {
+ pk.pack(std::string("r")); pk.pack_map(1);
+ pk.pack(std::string("id")); pk.pack(myid);
+ }
+
+ TransId t(tid);
+ pk.pack(std::string("t")); pk.pack_bin(t.size());
+ pk.pack_bin_body((const char*)t.data(), t.size());
+ pk.pack(std::string("y")); pk.pack(std::string("e"));
+ pk.pack(std::string("v")); pk.pack(my_v);
+ if (network) {
+ pk.pack(std::string("n")); pk.pack(network);
+ }
+
+ send(buffer.data(), buffer.size(), 0, addr);
+}
+
+void
+NetworkEngine::maintainRxBuffer(Tid tid)
+{
+ auto msg = partial_messages.find(tid);
+ if (msg != partial_messages.end()) {
+ const auto& now = scheduler.time();
+ if (msg->second.start + RX_MAX_PACKET_TIME < now
+ || msg->second.last_part + RX_TIMEOUT < now) {
+ DHT_LOG.w("Dropping expired partial message from %s", msg->second.from.toString().c_str());
+ partial_messages.erase(msg);
+ }
+ }
+}
+
+
+} /* namespace net */
+} /* namespace dht */
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ * Simon Désaulniers <simon.desaulniers@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+
+#include "node.h"
+#include "request.h"
+#include "rng.h"
+
+#include <sstream>
+
+namespace dht {
+
+constexpr std::chrono::minutes Node::NODE_EXPIRE_TIME;
+constexpr std::chrono::minutes Node::NODE_GOOD_TIME;
+constexpr std::chrono::seconds Node::MAX_RESPONSE_TIME;
+
+Node::Node(const InfoHash& id, const SockAddr& addr, bool client)
+: id(id), addr(addr), is_client(client), sockets_()
+{
+ thread_local crypto::random_device rd {};
+ transaction_id = std::uniform_int_distribution<Tid>{1}(rd);
+}
+
+/* This is our definition of a known-good node. */
+bool
+Node::isGood(time_point now) const
+{
+ return not expired_ &&
+ reply_time >= now - NODE_GOOD_TIME &&
+ time >= now - NODE_EXPIRE_TIME;
+}
+
+bool
+Node::isPendingMessage() const
+{
+ for (auto r : requests_) {
+ if (r.second->pending())
+ return true;
+ }
+ return false;
+}
+
+size_t
+Node::getPendingMessageCount() const
+{
+ size_t count {0};
+ for (auto r : requests_) {
+ if (r.second->pending())
+ count++;
+ }
+ return count;
+}
+
+void
+Node::update(const SockAddr& new_addr)
+{
+ addr = new_addr;
+}
+
+/** To be called when a message was sent to the node */
+void
+Node::requested(const Sp<net::Request>& req)
+{
+ auto e = requests_.emplace(req->getTid(), req);
+ if (not e.second and req != e.first->second) {
+ // Should not happen !
+ // Try to handle this scenario as well as we can
+ e.first->second->setExpired();
+ e.first->second = req;
+ }
+}
+
+/** To be called when a message was received from the node.
+ Req should be true if the message was an aswer to a request we made*/
+void
+Node::received(time_point now, const Sp<net::Request>& req)
+{
+ time = now;
+ expired_ = false;
+ if (req) {
+ reply_time = now;
+ requests_.erase(req->getTid());
+ }
+}
+
+Sp<net::Request>
+Node::getRequest(Tid tid)
+{
+ auto it = requests_.find(tid);
+ return it != requests_.end() ? it->second : nullptr;
+}
+
+void
+Node::cancelRequest(const Sp<net::Request>& req)
+{
+ if (req) {
+ req->cancel();
+ closeSocket(req->closeSocket());
+ requests_.erase(req->getTid());
+ }
+}
+
+void
+Node::setExpired()
+{
+ expired_ = true;
+ for (auto r : requests_) {
+ r.second->setExpired();
+ }
+ requests_.clear();
+ sockets_.clear();
+}
+
+Tid
+Node::openSocket(SocketCb&& cb)
+{
+ if (++transaction_id == 0)
+ transaction_id = 1;
+
+ auto sock = std::make_shared<Socket>(std::move(cb));
+ auto s = sockets_.emplace(transaction_id, std::move(sock));
+ if (not s.second)
+ s.first->second = std::move(sock);
+ return transaction_id;
+}
+
+Sp<Socket>
+Node::getSocket(Tid id)
+{
+ auto it = sockets_.find(id);
+ return it == sockets_.end() ? nullptr : it->second;
+}
+
+void
+Node::closeSocket(Tid id)
+{
+ if (id) {
+ sockets_.erase(id);
+ //DHT_LOG.w("Closing socket (tid: %d), %lu remaining", socket->id, sockets_.size());
+ }
+}
+
+std::string
+Node::toString() const
+{
+ std::stringstream ss;
+ ss << (*this);
+ return ss.str();
+}
+
+std::ostream& operator<< (std::ostream& s, const Node& h)
+{
+ s << h.id << " " << h.addr.toString();
+ return s;
+}
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "node_cache.h"
+
+namespace dht {
+
+constexpr size_t CLEANUP_MAX_NODES {1024};
+constexpr size_t CLEANUP_FREQ {1024};
+
+NodeCache::~NodeCache()
+{
+ cache_4.setExpired();
+ cache_6.setExpired();
+}
+
+Sp<Node>
+NodeCache::getNode(const InfoHash& id, sa_family_t family) {
+ return cache(family).getNode(id);
+}
+
+Sp<Node>
+NodeCache::getNode(const InfoHash& id, const SockAddr& addr, time_point now, bool confirm, bool client) {
+ if (not id)
+ return std::make_shared<Node>(id, addr);
+ return cache(addr.getFamily()).getNode(id, addr, now, confirm, client);
+}
+
+std::vector<Sp<Node>>
+NodeCache::getCachedNodes(const InfoHash& id, sa_family_t sa_f, size_t count) const
+{
+ return cache(sa_f).getCachedNodes(id, count);
+}
+
+std::vector<Sp<Node>>
+NodeCache::NodeMap::getCachedNodes(const InfoHash& id, size_t count) const
+{
+ std::vector<Sp<Node>> nodes;
+ nodes.reserve(std::min(size(), count));
+ const_iterator it;
+ auto dec_it = [this](const_iterator& it) {
+ auto ret = it;
+ it = (it == cbegin()) ? cend() : std::prev(it);
+ return ret;
+ };
+
+ auto it_p = lower_bound(id),
+ it_n = it_p;
+ if (not empty())
+ dec_it(it_p); /* Create 2 separate iterator if we could */
+
+ while (nodes.size() < count and (it_n != cend() or it_p != cend())) {
+ /* If one of the iterator is at the end, then take the other one
+ If they are both in middle of somewhere comapre both and take
+ the closest to the id. */
+ if (it_p == cend()) it = it_n++;
+ else if (it_n == cend()) it = dec_it(it_p);
+ else it = id.xorCmp(it_p->first, it_n->first) < 0 ? dec_it(it_p) : it_n++;
+
+ if (auto n = it->second.lock())
+ if ( not n->isExpired() and not n->isClient() )
+ nodes.emplace_back(std::move(n));
+ }
+
+ return nodes;
+}
+
+void
+NodeCache::clearBadNodes(sa_family_t family)
+{
+ if (family == 0) {
+ clearBadNodes(AF_INET);
+ clearBadNodes(AF_INET6);
+ } else {
+ cache(family).clearBadNodes();
+ }
+}
+
+Sp<Node>
+NodeCache::NodeMap::getNode(const InfoHash& id)
+{
+ auto wn = find(id);
+ if (wn == end())
+ return {};
+ if (auto n = wn->second.lock())
+ return n;
+ erase(wn);
+ return {};
+}
+
+Sp<Node>
+NodeCache::NodeMap::getNode(const InfoHash& id, const SockAddr& addr, time_point now, bool confirm, bool client)
+{
+ auto it = emplace(id, std::weak_ptr<Node>{});
+ auto node = it.first->second.lock();
+ if (not node) {
+ node = std::make_shared<Node>(id, addr, client);
+ it.first->second = node;
+ if (cleanup_counter++ == CLEANUP_FREQ) {
+ cleanup();
+ cleanup_counter = 0;
+ }
+ } else if (confirm or node->isOld(now)) {
+ node->update(addr);
+ }
+ return node;
+}
+
+void
+NodeCache::NodeMap::clearBadNodes() {
+ for (auto it = cbegin(); it != cend();) {
+ if (auto n = it->second.lock()) {
+ n->reset();
+ ++it;
+ } else {
+ erase(it++);
+ }
+ }
+ cleanup_counter = 0;
+}
+
+void
+NodeCache::NodeMap::setExpired() {
+ for (auto& wn : *this)
+ if (auto n = wn.second.lock())
+ n->setExpired();
+ clear();
+ cleanup_counter = 0;
+}
+
+void
+NodeCache::NodeMap::cleanup()
+{
+ auto it = lower_bound(InfoHash::getRandom());
+ for (size_t n = 0, maxNodes = std::min(size(), CLEANUP_MAX_NODES); n != maxNodes; n++) {
+ if (it == end())
+ it = begin();
+ if (it->second.expired())
+ erase(it++);
+ else
+ ++it;
+ }
+}
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2018 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+#include "op_cache.h"
+
+namespace dht {
+
+constexpr const std::chrono::seconds OpCache::EXPIRATION;
+
+bool
+OpValueCache::onValuesAdded(const std::vector<Sp<Value>>& vals) {
+ std::vector<Sp<Value>> newValues;
+ for (const auto& v : vals) {
+ auto viop = values.emplace(v->id, OpCacheValueStorage{v});
+ if (viop.second) {
+ newValues.emplace_back(v);
+ } else {
+ viop.first->second.refCount++;
+ }
+ }
+ return newValues.empty() ? true : callback(newValues, false);
+}
+bool
+OpValueCache::onValuesExpired(const std::vector<Sp<Value>>& vals) {
+ std::vector<Sp<Value>> expiredValues;
+ for (const auto& v : vals) {
+ auto vit = values.find(v->id);
+ if (vit != values.end()) {
+ vit->second.refCount--;
+ if (not vit->second.refCount) {
+ expiredValues.emplace_back(std::move(vit->second.data));
+ values.erase(vit);
+ }
+ }
+ }
+ return expiredValues.empty() ? true : callback(expiredValues, true);
+}
+std::vector<Sp<Value>>
+OpValueCache::get(Value::Filter& filter) const {
+ std::vector<Sp<Value>> ret;
+ if (not filter)
+ ret.reserve(values.size());
+ for (const auto& v : values)
+ if (not filter or filter(*v.second.data))
+ ret.emplace_back(v.second.data);
+ return ret;
+}
+
+Sp<Value>
+OpValueCache::get(Value::Id id) const {
+ auto v = values.find(id);
+ if (v == values.end())
+ return {};
+ return v->second.data;
+}
+
+std::vector<Sp<Value>>
+OpValueCache::getValues() const {
+ std::vector<Sp<Value>> ret;
+ ret.reserve(values.size());
+ for (const auto& v : values)
+ ret.emplace_back(v.second.data);
+ return ret;
+}
+
+void
+OpCache::onValuesAdded(const std::vector<Sp<Value>>& vals) {
+ if (not listeners.empty()) {
+ std::vector<LocalListener> list;
+ list.reserve(listeners.size());
+ for (const auto& l : listeners)
+ list.emplace_back(l.second);
+ for (auto& l : list)
+ l.get_cb(l.filter.filter(vals), false);
+ }
+}
+
+void
+OpCache::onValuesExpired(const std::vector<Sp<Value>>& vals) {
+ if (not listeners.empty()) {
+ std::vector<LocalListener> list;
+ list.reserve(listeners.size());
+ for (const auto& l : listeners)
+ list.emplace_back(l.second);
+ for (auto& l : list)
+ l.get_cb(l.filter.filter(vals), true);
+ }
+}
+
+time_point
+OpCache::getExpiration() const {
+ if (not listeners.empty())
+ return time_point::max();
+ return lastRemoved + EXPIRATION;
+}
+
+size_t
+SearchCache::listen(ValueCallback get_cb, Sp<Query> q, Value::Filter filter, std::function<size_t(Sp<Query>, ValueCallback)> onListen)
+{
+ // find exact match
+ auto op = ops.find(q);
+ if (op == ops.end()) {
+ // find satisfying query
+ for (auto it = ops.begin(); it != ops.end(); it++) {
+ if (q->isSatisfiedBy(*it->first)) {
+ op = it;
+ break;
+ }
+ }
+ }
+ if (op == ops.end()) {
+ // New query
+ op = ops.emplace(q, std::unique_ptr<OpCache>(new OpCache)).first;
+ auto& cache = *op->second;
+ cache.searchToken = onListen(q, [&](const std::vector<Sp<Value>>& values, bool expired){
+ return cache.onValue(values, expired);
+ });
+ }
+ auto token = nextToken_++;
+ if (nextToken_ == 0)
+ nextToken_++;
+ return op->second->addListener(token, get_cb, q, filter) ? token : 0;
+}
+
+bool
+SearchCache::cancelListen(size_t gtoken, const time_point& now) {
+ for (auto& op : ops) {
+ if (op.second->removeListener(gtoken, now)) {
+ nextExpiration_ = std::min(nextExpiration_, op.second->getExpiration());
+ return true;
+ }
+ }
+ return false;
+}
+
+void
+SearchCache::cancelAll(std::function<void(size_t)> onCancel) {
+ for (auto& op : ops) {
+ auto cache = std::move(op.second);
+ cache->removeAll();
+ onCancel(cache->searchToken);
+ }
+ ops.clear();
+}
+
+time_point
+SearchCache::expire(const time_point& now, std::function<void(size_t)> onCancel) {
+ nextExpiration_ = time_point::max();
+ auto ret = nextExpiration_;
+ for (auto it = ops.begin(); it != ops.end();) {
+ auto expiration = it->second->getExpiration();
+ if (expiration < now) {
+ auto cache = std::move(it->second);
+ it = ops.erase(it);
+ onCancel(cache->searchToken);
+ } else {
+ nextExpiration_ = std::min(nextExpiration_, expiration);
+ ret = nextExpiration_;
+ ++it;
+ }
+ }
+ return ret;
+}
+
+std::vector<Sp<Value>>
+SearchCache::get(Value::Filter& filter) const {
+ if (ops.size() == 1)
+ return ops.begin()->second->get(filter);
+ std::map<Value::Id, Sp<Value>> c;
+ for (const auto& op : ops) {
+ for (const auto& v : op.second->get(filter))
+ c.emplace(v->id, v);
+ }
+ std::vector<Sp<Value>> ret;
+ ret.reserve(c.size());
+ for (auto& v : c)
+ ret.emplace_back(std::move(v.second));
+ return ret;
+}
+
+Sp<Value>
+SearchCache::get(Value::Id id) const {
+ for (const auto& op : ops)
+ if (auto v = op.second->get(id))
+ return v;
+ return {};
+}
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2018 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+#pragma once
+
+#include "value.h"
+#include "value_cache.h"
+#include "listener.h"
+
+namespace dht {
+
+struct OpCacheValueStorage
+{
+ Sp<Value> data {};
+ unsigned refCount {1};
+ OpCacheValueStorage(Sp<Value> val = {}) : data(val) {}
+};
+
+class OpValueCache {
+public:
+ OpValueCache(ValueCallback&& cb) : callback(std::forward<ValueCallback>(cb)) {}
+ OpValueCache(OpValueCache&& o) : values(std::move(o.values)), callback(std::move(o.callback)) {
+ o.callback = {};
+ }
+
+ static ValueCallback cacheCallback(ValueCallback&& cb, std::function<void()>&& onCancel) {
+ auto cache = std::make_shared<OpValueCache>(std::forward<ValueCallback>(cb));
+ return [cache, onCancel](const std::vector<Sp<Value>>& vals, bool expired){
+ auto ret = cache->onValue(vals, expired);
+ if (not ret)
+ onCancel();
+ return ret;
+ };
+ }
+
+ bool onValue(const std::vector<Sp<Value>>& vals, bool expired) {
+ if (expired)
+ return onValuesExpired(vals);
+ else
+ return onValuesAdded(vals);
+ }
+
+ bool onValuesAdded(const std::vector<Sp<Value>>& vals);
+ bool onValuesExpired(const std::vector<Sp<Value>>& vals);
+
+ std::vector<Sp<Value>> get(Value::Filter& filter) const;
+ Sp<Value> get(Value::Id id) const;
+ std::vector<Sp<Value>> getValues() const;
+
+private:
+ OpValueCache(const OpValueCache&) = delete;
+ OpValueCache& operator=(const OpValueCache&) = delete;
+
+ std::map<Value::Id, OpCacheValueStorage> values {};
+ ValueCallback callback;
+};
+
+class OpCache {
+public:
+ OpCache() : cache([this](const std::vector<Sp<Value>>& vals, bool expired){
+ if (expired)
+ onValuesExpired(vals);
+ else
+ onValuesAdded(vals);
+ return true;
+ }) {}
+
+ bool onValue(const std::vector<Sp<Value>>& vals, bool expired) {
+ cache.onValue(vals, expired);
+ return not listeners.empty();
+ }
+
+ void onValuesAdded(const std::vector<Sp<Value>>& vals);
+ void onValuesExpired(const std::vector<Sp<Value>>& vals);
+
+ bool addListener(size_t token, ValueCallback cb, Sp<Query> q, Value::Filter filter) {
+ listeners.emplace(token, LocalListener{q, filter, cb});
+ auto cached = cache.get(filter);
+ if (not cached.empty()) {
+ auto ret = cb(cached, false);
+ if (not ret)
+ listeners.erase(token);
+ return ret;
+ }
+ return true;
+ }
+
+ bool removeListener(size_t token, const time_point& now) {
+ lastRemoved = now;
+ return listeners.erase(token) > 0;
+ }
+
+ void removeAll() {
+ listeners.clear();
+ }
+
+ bool isDone() {
+ return listeners.empty();
+ }
+
+ std::vector<Sp<Value>> get(Value::Filter& filter) const {
+ return cache.get(filter);
+ }
+
+ Sp<Value> get(Value::Id id) const {
+ return cache.get(id);
+ }
+
+ bool isExpired(const time_point& now) const {
+ return listeners.empty() and (lastRemoved + EXPIRATION < now);
+ }
+ time_point getExpiration() const;
+
+ size_t searchToken;
+private:
+ constexpr static const std::chrono::seconds EXPIRATION {60};
+ OpCache(const OpCache&) = delete;
+ OpCache& operator=(const OpCache&) = delete;
+
+ OpValueCache cache;
+ std::map<size_t, LocalListener> listeners;
+ time_point lastRemoved {clock::now()};
+};
+
+class SearchCache {
+public:
+ SearchCache() {}
+ SearchCache(SearchCache&&) = default;
+ size_t listen(ValueCallback get_cb, Sp<Query> q, Value::Filter filter, std::function<size_t(Sp<Query>, ValueCallback)> onListen);
+
+ bool cancelListen(size_t gtoken, const time_point& now);
+ void cancelAll(std::function<void(size_t)> onCancel);
+
+ time_point expire(const time_point& now, std::function<void(size_t)> onCancel);
+ time_point getExpiration() const {
+ return nextExpiration_;
+ }
+
+ std::vector<Sp<Value>> get(Value::Filter& filter) const;
+ Sp<Value> get(Value::Id id) const;
+
+private:
+ SearchCache(const SearchCache&) = delete;
+ SearchCache& operator=(const SearchCache&) = delete;
+
+ std::map<Sp<Query>, std::unique_ptr<OpCache>> ops {};
+ size_t nextToken_ {1};
+ time_point nextExpiration_ {time_point::max()};
+};
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2017 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+ #pragma once
+
+#include "infohash.h"
+#include "sockaddr.h"
+#include "net.h"
+
+#include <map>
+
+namespace dht {
+namespace net {
+
+Tid unpackTid(const msgpack::object& o) {
+ switch (o.type) {
+ case msgpack::type::POSITIVE_INTEGER:
+ return o.as<Tid>();
+ default:
+ return ntohl(*reinterpret_cast<const uint32_t*>(o.as<std::array<char, 4>>().data()));
+ }
+}
+
+struct ParsedMessage {
+ MessageType type;
+ /* Node ID of the sender */
+ InfoHash id;
+ /* Network id */
+ NetId network {0};
+ /** Is a client node */
+ bool is_client {false};
+ /* hash for which values are requested */
+ InfoHash info_hash;
+ /* target id around which to find nodes */
+ InfoHash target;
+ /* transaction id */
+ Tid tid;
+ /* tid for packets going through request socket */
+ Tid socket_id;
+ /* security token */
+ Blob token;
+ /* the value id (announce confirmation) */
+ Value::Id value_id;
+ /* time when value was first created */
+ time_point created { time_point::max() };
+ /* IPv4 nodes in response to a 'find' request */
+ Blob nodes4_raw, nodes6_raw;
+ std::vector<Sp<Node>> nodes4, nodes6;
+ /* values to store or retreive request */
+ std::vector<Sp<Value>> values;
+ std::vector<Value::Id> refreshed_values {};
+ std::vector<Value::Id> expired_values {};
+ /* index for fields values */
+ std::vector<Sp<FieldValueIndex>> fields;
+ /** When part of the message header: {index -> (total size, {})}
+ * When part of partial value data: {index -> (offset, part_data)} */
+ std::map<unsigned, std::pair<unsigned, Blob>> value_parts;
+ /* query describing a filter to apply on values. */
+ Query query;
+ /* states if ipv4 or ipv6 request */
+ want_t want;
+ /* error code in case of error */
+ uint16_t error_code;
+ /* reported address by the distant node */
+ std::string ua;
+ SockAddr addr;
+ void msgpack_unpack(msgpack::object o);
+
+ bool append(const ParsedMessage& block);
+ bool complete();
+};
+
+bool
+ParsedMessage::append(const ParsedMessage& block)
+{
+ bool ret(false);
+ for (const auto& ve : block.value_parts) {
+ auto part_val = value_parts.find(ve.first);
+ if (part_val == value_parts.end()
+ || part_val->second.second.size() >= part_val->second.first)
+ continue;
+ // TODO: handle out-of-order packets
+ if (ve.second.first != part_val->second.second.size()) {
+ //std::cout << "skipping out-of-order packet" << std::endl;
+ continue;
+ }
+ ret = true;
+ part_val->second.second.insert(part_val->second.second.end(),
+ ve.second.second.begin(),
+ ve.second.second.end());
+ }
+ return ret;
+}
+
+bool
+ParsedMessage::complete()
+{
+ for (auto& e : value_parts) {
+ //std::cout << "part " << e.first << ": " << e.second.second.size() << "/" << e.second.first << std::endl;
+ if (e.second.first > e.second.second.size())
+ return false;
+ }
+ for (auto& e : value_parts) {
+ msgpack::unpacked msg;
+ msgpack::unpack(msg, (const char*)e.second.second.data(), e.second.second.size());
+ values.emplace_back(std::make_shared<Value>(msg.get()));
+ }
+ return true;
+}
+
+void
+ParsedMessage::msgpack_unpack(msgpack::object msg)
+{
+ auto y = findMapValue(msg, "y");
+ auto r = findMapValue(msg, "r");
+ auto u = findMapValue(msg, "u");
+ auto e = findMapValue(msg, "e");
+ auto v = findMapValue(msg, "p");
+
+ if (auto t = findMapValue(msg, "t"))
+ tid = unpackTid(*t);
+
+ if (auto rv = findMapValue(msg, "v"))
+ ua = rv->as<std::string>();
+
+ if (auto netid = findMapValue(msg, "n"))
+ network = netid->as<NetId>();
+
+ if (auto is_client_v = findMapValue(msg, "s"))
+ is_client = is_client_v->as<bool>();
+
+ std::string q;
+ if (auto rq = findMapValue(msg, "q")) {
+ if (rq->type != msgpack::type::STR)
+ throw msgpack::type_error();
+ q = rq->as<std::string>();
+ }
+
+ if (e)
+ type = MessageType::Error;
+ else if (r)
+ type = MessageType::Reply;
+ else if (v)
+ type = MessageType::ValueData;
+ else if (u)
+ type = MessageType::ValueUpdate;
+ else if (y and y->as<std::string>() != "q")
+ throw msgpack::type_error();
+ else if (q == "ping")
+ type = MessageType::Ping;
+ else if (q == "find")
+ type = MessageType::FindNode;
+ else if (q == "get")
+ type = MessageType::GetValues;
+ else if (q == "listen")
+ type = MessageType::Listen;
+ else if (q == "put")
+ type = MessageType::AnnounceValue;
+ else if (q == "refresh")
+ type = MessageType::Refresh;
+ else
+ throw msgpack::type_error();
+
+ if (type == MessageType::ValueData) {
+ if (v->type != msgpack::type::MAP)
+ throw msgpack::type_error();
+ for (size_t i = 0; i < v->via.map.size; ++i) {
+ auto& vdat = v->via.map.ptr[i];
+ auto o = findMapValue(vdat.val, "o");
+ auto d = findMapValue(vdat.val, "d");
+ if (not o or not d)
+ continue;
+ value_parts.emplace(vdat.key.as<unsigned>(), std::pair<size_t, Blob>(o->as<size_t>(), unpackBlob(*d)));
+ }
+ return;
+ }
+
+ auto a = findMapValue(msg, "a");
+ if (!a && !r && !e && !u)
+ throw msgpack::type_error();
+ auto& req = a ? *a : (r ? *r : (u ? *u : *e));
+
+ if (e) {
+ if (e->type != msgpack::type::ARRAY)
+ throw msgpack::type_error();
+ error_code = e->via.array.ptr[0].as<uint16_t>();
+ }
+
+ if (auto t = findMapValue(req, "sid"))
+ socket_id = unpackTid(*t);
+
+ if (auto rid = findMapValue(req, "id"))
+ id = {*rid};
+
+ if (auto rh = findMapValue(req, "h"))
+ info_hash = {*rh};
+
+ if (auto rtarget = findMapValue(req, "target"))
+ target = {*rtarget};
+
+ if (auto rquery = findMapValue(req, "q"))
+ query.msgpack_unpack(*rquery);
+
+ if (auto otoken = findMapValue(req, "token"))
+ token = unpackBlob(*otoken);
+
+ if (auto vid = findMapValue(req, "vid"))
+ value_id = vid->as<Value::Id>();
+
+ if (auto rnodes4 = findMapValue(req, "n4"))
+ nodes4_raw = unpackBlob(*rnodes4);
+
+ if (auto rnodes6 = findMapValue(req, "n6"))
+ nodes6_raw = unpackBlob(*rnodes6);
+
+ if (auto sa = findMapValue(req, "sa")) {
+ if (sa->type != msgpack::type::BIN)
+ throw msgpack::type_error();
+ auto l = sa->via.bin.size;
+ if (l == sizeof(in_addr)) {
+ addr.setFamily(AF_INET);
+ auto& a = addr.getIPv4();
+ a.sin_port = 0;
+ std::copy_n(sa->via.bin.ptr, l, (char*)&a.sin_addr);
+ } else if (l == sizeof(in6_addr)) {
+ addr.setFamily(AF_INET6);
+ auto& a = addr.getIPv6();
+ a.sin6_port = 0;
+ std::copy_n(sa->via.bin.ptr, l, (char*)&a.sin6_addr);
+ }
+ } else
+ addr = {};
+
+ if (auto rcreated = findMapValue(req, "c"))
+ created = from_time_t(rcreated->as<std::time_t>());
+
+ if (auto rvalues = findMapValue(req, "values")) {
+ if (rvalues->type != msgpack::type::ARRAY)
+ throw msgpack::type_error();
+ for (size_t i = 0; i < rvalues->via.array.size; i++) {
+ auto& packed_v = rvalues->via.array.ptr[i];
+ if (packed_v.type == msgpack::type::POSITIVE_INTEGER) {
+ // Skip oversize values with a small margin for header overhead
+ if (packed_v.via.u64 > MAX_VALUE_SIZE + 32)
+ continue;
+ value_parts.emplace(i, std::make_pair(packed_v.via.u64, Blob{}));
+ } else {
+ try {
+ values.emplace_back(std::make_shared<Value>(rvalues->via.array.ptr[i]));
+ } catch (const std::exception& e) {
+ //DHT_LOG_WARN("Error reading value: %s", e.what());
+ }
+ }
+ }
+ } else if (auto raw_fields = findMapValue(req, "fields")) {
+ if (auto rfields = findMapValue(*raw_fields, "f")) {
+ auto vfields = rfields->as<std::set<Value::Field>>();
+ if (auto rvalues = findMapValue(*raw_fields, "v")) {
+ if (rvalues->type != msgpack::type::ARRAY)
+ throw msgpack::type_error();
+ size_t val_num = rvalues->via.array.size / vfields.size();
+ for (size_t i = 0; i < val_num; ++i) {
+ try {
+ auto v = std::make_shared<FieldValueIndex>();
+ v->msgpack_unpack_fields(vfields, *rvalues, i*vfields.size());
+ fields.emplace_back(std::move(v));
+ } catch (const std::exception& e) { }
+ }
+ }
+ } else {
+ throw msgpack::type_error();
+ }
+ } else if (auto raw_fields = findMapValue(req, "exp")) {
+ expired_values = raw_fields->as<decltype(expired_values)>();
+ } else if (auto raw_fields = findMapValue(req, "re")) {
+ refreshed_values = raw_fields->as<decltype(refreshed_values)>();
+ }
+
+ if (auto w = findMapValue(req, "w")) {
+ if (w->type != msgpack::type::ARRAY)
+ throw msgpack::type_error();
+ want = 0;
+ for (unsigned i=0; i<w->via.array.size; i++) {
+ auto& val = w->via.array.ptr[i];
+ try {
+ auto w = val.as<sa_family_t>();
+ if (w == AF_INET)
+ want |= WANT4;
+ else if(w == AF_INET6)
+ want |= WANT6;
+ } catch (const std::exception& e) {};
+ }
+ } else {
+ want = -1;
+ }
+}
+
+} /* namespace net */
+} /* namespace dht */
--- /dev/null
+/*
+ * Copyright (C) 2016 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ * Simon Désaulniers <simon.desaulniers@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "net.h"
+#include "value.h"
+
+namespace dht {
+struct Node;
+namespace net {
+
+class NetworkEngine;
+struct ParsedMessage;
+
+/*!
+ * @class Request
+ * @brief An atomic request destined to a node.
+ * @details
+ * A request contains data used by the NetworkEngine to process a request
+ * desitned to specific node and std::function callbacks to execute when the
+ * request is done.
+ */
+struct Request {
+ friend class dht::net::NetworkEngine;
+
+ Sp<Node> node {}; /* the node to whom the request is destined. */
+ time_point reply_time {time_point::min()}; /* time when we received the response to the request. */
+
+ enum class State
+ {
+ PENDING,
+ CANCELLED,
+ EXPIRED,
+ COMPLETED
+ };
+
+ bool expired() const { return state_ == State::EXPIRED; }
+ bool completed() const { return state_ == State::COMPLETED; }
+ bool cancelled() const { return state_ == State::CANCELLED; }
+ bool pending() const { return state_ == State::PENDING; }
+ bool over() const { return not pending(); }
+ State getState() const { return state_; }
+ char getStateChar() const {
+ switch (state_) {
+ case State::PENDING: return 'f';
+ case State::CANCELLED: return 'c';
+ case State::EXPIRED: return 'e';
+ case State::COMPLETED: return 'a';
+ default: return '?';
+ }
+ }
+
+ Request(State state = State::PENDING) : state_(state) {}
+ Request(MessageType type, Tid tid,
+ Sp<Node> node,
+ Blob&& msg,
+ std::function<void(const Request&, ParsedMessage&&)> on_done,
+ std::function<void(const Request&, bool)> on_expired,
+ Tid socket = 0) :
+ node(node), tid(tid), type(type), on_done(on_done), on_expired(on_expired), msg(std::move(msg)), socket(socket) { }
+
+ Tid getTid() const { return tid; }
+ MessageType getType() const { return type; }
+
+ Tid getSocket() { return socket; }
+ Tid closeSocket() { auto ret = socket; socket = 0; return ret; }
+
+ void setExpired() {
+ if (pending()) {
+ state_ = Request::State::EXPIRED;
+ on_expired(*this, true);
+ clear();
+ }
+ }
+ void setDone(ParsedMessage&& msg) {
+ if (pending()) {
+ state_ = Request::State::COMPLETED;
+ on_done(*this, std::forward<ParsedMessage>(msg));
+ clear();
+ }
+ }
+
+ void cancel() {
+ if (pending()) {
+ state_ = State::CANCELLED;
+ clear();
+ }
+ }
+
+private:
+ static const constexpr size_t MAX_ATTEMPT_COUNT {3};
+
+ bool isExpired(time_point now) const {
+ return pending() and now > last_try + Node::MAX_RESPONSE_TIME and attempt_count >= Request::MAX_ATTEMPT_COUNT;
+ }
+
+ void clear() {
+ on_done = {};
+ on_expired = {};
+ msg.clear();
+ }
+
+ const Tid tid {0}; /* the request id. */
+ const MessageType type {};
+ State state_ {State::PENDING};
+
+ unsigned attempt_count {0}; /* number of attempt to process the request. */
+ time_point start {time_point::min()}; /* time when the request is created. */
+ time_point last_try {time_point::min()}; /* time of the last attempt to process the request. */
+
+ std::function<void(const Request&, ParsedMessage&&)> on_done {};
+ std::function<void(const Request&, bool)> on_expired {};
+
+ Blob msg {}; /* the serialized message. */
+ Tid socket; /* the socket used for further reponses. */
+};
+
+} /* namespace net */
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "rng.h"
+
+#include <chrono>
+#include <cstring>
+
+namespace dht {
+namespace crypto {
+
+random_device::random_device() :
+ gen(std::chrono::system_clock::now().time_since_epoch().count())
+{}
+
+random_device::result_type
+random_device::operator()()
+{
+ result_type prand = dis(gen);
+ result_type hwrand;
+ if (hasRdseed() and rdseed(&hwrand))
+ prand ^= hwrand;
+ else if (hasRdrand() and rdrand(&hwrand))
+ prand ^= hwrand;
+ return prand;
+}
+
+random_device::CPUIDinfo::CPUIDinfo(const unsigned int func, const unsigned int subfunc)
+{
+ __asm__ __volatile__ (
+ "cpuid"
+ : "=a"(EAX), "=b"(EBX), "=c"(ECX), "=d"(EDX)
+ : "a"(func), "c"(subfunc)
+ );
+}
+
+bool
+random_device::hasIntelCpu()
+{
+ CPUIDinfo info (0, 0);
+ return (memcmp((char *) (&info.EBX), "Genu", 4) == 0
+ && memcmp((char *) (&info.EDX), "ineI", 4) == 0
+ && memcmp((char *) (&info.ECX), "ntel", 4) == 0);
+}
+
+bool
+random_device::_hasRdrand()
+{
+ return hasIntelCpu() && (CPUIDinfo {1, 0}.ECX & (1 << 30));
+}
+
+bool
+random_device::_hasRdseed()
+{
+ return hasIntelCpu() && (CPUIDinfo {7, 0}.ECX & (1 << 18));
+}
+
+bool
+random_device::rdrandStep(result_type* r)
+{
+ unsigned char ok;
+ asm volatile ("rdrand %0; setc %1"
+ : "=r" (*r), "=qm" (ok));
+ return ok;
+}
+
+bool
+random_device::rdrand(result_type* r)
+{
+ result_type res;
+ unsigned retries = 8;
+ while (retries--)
+ if (rdrandStep(&res)) {
+ *r = res;
+ return true;
+ }
+ return false;
+}
+
+bool
+random_device::rdseedStep(result_type* r)
+{
+ unsigned char ok;
+ asm volatile ("rdseed %0; setc %1"
+ : "=r" (*r), "=qm" (ok));
+ return ok;
+}
+
+bool
+random_device::rdseed(result_type* r)
+{
+ result_type res;
+ unsigned retries = 256;
+ while (retries--)
+ if (rdseedStep(&res)) {
+ *r = res;
+ return true;
+ }
+ return false;
+}
+
+}}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "routing_table.h"
+
+#include "network_engine.h"
+#include "rng.h"
+
+#include <memory>
+
+namespace dht {
+
+static std::mt19937 rd{ dht::crypto::random_device{}() };
+#ifdef _WIN32
+static std::uniform_int_distribution<int> rand_byte{ 0, std::numeric_limits<uint8_t>::max() };
+#else
+static std::uniform_int_distribution<uint8_t> rand_byte;
+#endif
+
+Sp<Node>
+Bucket::randomNode()
+{
+ if (nodes.empty())
+ return nullptr;
+ unsigned expired_node_count = std::count_if(nodes.cbegin(), nodes.cend(), [](const decltype(nodes)::value_type& node) {
+ return node->isExpired();
+ });
+ auto prioritize_not_expired = expired_node_count < nodes.size();
+
+ std::uniform_int_distribution<unsigned> rand_node(0, prioritize_not_expired
+ ? nodes.size() - expired_node_count - 1
+ : nodes.size()-1);
+ unsigned nn = rand_node(rd);
+ for (auto& n : nodes) {
+ if (not (prioritize_not_expired and n->isExpired())) {
+ if (not nn--)
+ return n;
+ }
+ }
+ return nodes.back();
+}
+
+void Bucket::sendCachedPing(net::NetworkEngine& ne)
+{
+ if (not cached)
+ return;
+ //DHT_LOG.d(b.cached->id, "[node %s] sending ping to cached node", cached->toString().c_str());
+ ne.sendPing(cached, nullptr, nullptr);
+ cached = {};
+}
+
+InfoHash
+RoutingTable::randomId(const RoutingTable::const_iterator& it) const
+{
+ int bit1 = it->first.lowbit();
+ int bit2 = std::next(it) != end() ? std::next(it)->first.lowbit() : -1;
+ int bit = std::max(bit1, bit2) + 1;
+
+ if (bit >= 8*(int)HASH_LEN)
+ return it->first;
+
+ int b = bit/8;
+ InfoHash id_return;
+ std::copy_n(it->first.cbegin(), b, id_return.begin());
+ id_return[b] = it->first[b] & (0xFF00 >> (bit % 8));
+ id_return[b] |= rand_byte(rd) >> (bit % 8);
+ for (unsigned i = b + 1; i < HASH_LEN; i++)
+ id_return[i] = rand_byte(rd);
+ return id_return;
+}
+
+InfoHash
+RoutingTable::middle(const RoutingTable::const_iterator& it) const
+{
+ unsigned bit = depth(it);
+ if (bit >= 8*HASH_LEN)
+ throw std::out_of_range("End of table");
+
+ InfoHash id = it->first;
+ id.setBit(bit, 1);
+ return id;
+}
+
+unsigned
+RoutingTable::depth(const RoutingTable::const_iterator& it) const
+{
+ if (it == end())
+ return 0;
+ int bit1 = it->first.lowbit();
+ int bit2 = std::next(it) != end() ? std::next(it)->first.lowbit() : -1;
+ return std::max(bit1, bit2)+1;
+}
+
+std::vector<Sp<Node>>
+RoutingTable::findClosestNodes(const InfoHash id, time_point now, size_t count) const
+{
+ std::vector<Sp<Node>> nodes;
+ nodes.reserve(count);
+ auto bucket = findBucket(id);
+
+ if (bucket == end()) { return nodes; }
+
+ auto sortedBucketInsert = [&](const Bucket &b) {
+ for (auto n : b.nodes) {
+ if (not n->isGood(now))
+ continue;
+
+ auto here = std::find_if(nodes.begin(), nodes.end(),
+ [&id,&n](Sp<Node> &node) {
+ return id.xorCmp(n->id, node->id) < 0;
+ }
+ );
+ nodes.insert(here, n);
+ }
+ };
+
+ auto itn = bucket;
+ auto itp = (bucket == begin()) ? end() : std::prev(bucket);
+ while (nodes.size() < count && (itn != end() || itp != end())) {
+ if (itn != end()) {
+ sortedBucketInsert(*itn);
+ itn = std::next(itn);
+ }
+ if (itp != end()) {
+ sortedBucketInsert(*itp);
+ itp = (itp == begin()) ? end() : std::prev(itp);
+ }
+ }
+
+ // shrink to the count closest nodes.
+ if (nodes.size() > count) {
+ nodes.resize(count);
+ }
+ return nodes;
+}
+
+RoutingTable::iterator
+RoutingTable::findBucket(const InfoHash& id)
+{
+ if (empty())
+ return end();
+ auto b = begin();
+ while (true) {
+ auto next = std::next(b);
+ if (next == end())
+ return b;
+ if (InfoHash::cmp(id, next->first) < 0)
+ return b;
+ b = next;
+ }
+}
+
+RoutingTable::const_iterator
+RoutingTable::findBucket(const InfoHash& id) const
+{
+ /* Avoid code duplication for the const version */
+ const_iterator it = const_cast<RoutingTable*>(this)->findBucket(id);
+ return it;
+}
+
+/* Split a bucket into two equal parts. */
+bool
+RoutingTable::split(const RoutingTable::iterator& b)
+{
+ InfoHash new_id;
+ try {
+ new_id = middle(b);
+ } catch (const std::out_of_range& e) {
+ return false;
+ }
+
+ // Insert new bucket
+ insert(std::next(b), Bucket {b->af, new_id, b->time});
+
+ // Re-assign nodes
+ std::list<Sp<Node>> nodes {};
+ nodes.splice(nodes.begin(), b->nodes);
+ while (!nodes.empty()) {
+ auto n = nodes.begin();
+ auto b = findBucket((*n)->id);
+ if (b == end())
+ nodes.erase(n);
+ else
+ b->nodes.splice(b->nodes.begin(), nodes, n);
+ }
+ return true;
+}
+
+bool
+RoutingTable::onNewNode(const Sp<Node>& node, int confirm, const time_point& now, const InfoHash& myid, net::NetworkEngine& ne) {
+ auto b = findBucket(node->id);
+ if (b == end()) return false;
+
+ if (confirm == 2)
+ b->time = now;
+
+ for (auto& n : b->nodes) {
+ if (n == node)
+ return false;
+ }
+
+ bool mybucket = contains(b, myid);
+ if (mybucket) {
+ grow_time = now;
+ //scheduler.edit(nextNodesConfirmation, now);
+ }
+
+ if (b->nodes.size() >= TARGET_NODES) {
+ /* Try to get rid of an expired node. */
+ for (auto& n : b->nodes)
+ if (n->isExpired()) {
+ n = node;
+ return true;
+ }
+ /* Bucket full. Ping a dubious node */
+ bool dubious = false;
+ for (auto& n : b->nodes) {
+ /* Pick the first dubious node that we haven't pinged in the
+ last 9 seconds. This gives nodes the time to reply, but
+ tends to concentrate on the same nodes, so that we get rid
+ of bad nodes fast. */
+ if (not n->isGood(now)) {
+ dubious = true;
+ if (not n->isPendingMessage()) {
+ //DHT_LOG.d(n->id, "[node %s] sending ping to dubious node", n->toString().c_str());
+ ne.sendPing(n, nullptr, nullptr);
+ break;
+ }
+ }
+ }
+
+ if ((mybucket || (is_client and depth(b) < 6)) && (!dubious || size() == 1)) {
+ //DHT_LOG.d("Splitting from depth %u", depth(b));
+ b->sendCachedPing(ne);
+ split(b);
+ return onNewNode(node, confirm, now, myid, ne);
+ }
+
+ /* No space for this node. Cache it away for later. */
+ if (confirm or not b->cached)
+ b->cached = node;
+ } else {
+ /* Create a new node. */
+ b->nodes.emplace_front(node);
+ }
+ return true;
+}
+
+
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2018 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "value.h"
+#include "request.h"
+#include "listener.h"
+#include "value_cache.h"
+#include "op_cache.h"
+
+namespace dht {
+
+/**
+ * A single "get" operation data
+ */
+struct Dht::Get {
+ time_point start;
+ Value::Filter filter;
+ Sp<Query> query;
+ QueryCallback query_cb;
+ GetCallback get_cb;
+ DoneCallback done_cb;
+};
+
+/**
+ * A single "put" operation data
+ */
+struct Dht::Announce {
+ bool permanent;
+ Sp<Value> value;
+ time_point created;
+ DoneCallback callback;
+};
+
+struct Dht::SearchNode {
+ /**
+ * Foreach value id, we keep track of a pair (net::Request, time_point) where the
+ * request is the request returned by the network engine and the time_point
+ * is the next time at which the value must be refreshed.
+ */
+ using AnnounceStatus = std::map<Value::Id, std::pair<Sp<net::Request>, time_point>>;
+ /**
+ * Foreach Query, we keep track of the request returned by the network
+ * engine when we sent the "get".
+ */
+ using SyncStatus = std::map<Sp<Query>, Sp<net::Request>>;
+
+ struct CachedListenStatus {
+ ValueCache cache;
+ Sp<Scheduler::Job> cacheExpirationJob {};
+ Sp<net::Request> req {};
+ CachedListenStatus(ValueStateCallback&& cb) : cache(std::forward<ValueStateCallback>(cb)) {}
+ CachedListenStatus(CachedListenStatus&&) = default;
+ CachedListenStatus(const CachedListenStatus&) = delete;
+ CachedListenStatus& operator=(const CachedListenStatus&) = delete;
+ };
+ using NodeListenerStatus = std::map<Sp<Query>, CachedListenStatus>;
+
+ Sp<Node> node {}; /* the node info */
+
+ /* queries sent for finding out values hosted by the node */
+ Sp<Query> probe_query {};
+ /* queries substituting formal 'get' requests */
+ std::map<Sp<Query>, std::vector<Sp<Query>>> pagination_queries {};
+
+ SyncStatus getStatus {}; /* get/sync status */
+ NodeListenerStatus listenStatus {}; /* listen status */
+ AnnounceStatus acked {}; /* announcement status for a given value id */
+
+ Blob token {}; /* last token the node sent to us after a get request */
+ time_point last_get_reply {time_point::min()}; /* last time received valid token */
+ bool candidate {false}; /* A search node is candidate if the search is/was synced and this
+ node is a new candidate for inclusion. */
+ Sp<Scheduler::Job> syncJob {};
+
+ SearchNode() : node() {}
+ SearchNode(const SearchNode&) = delete;
+ SearchNode(SearchNode&&) = default;
+ SearchNode& operator=(const SearchNode&) = delete;
+ SearchNode& operator=(SearchNode&&) = default;
+
+ SearchNode(const Sp<Node>& node) : node(node) {}
+ ~SearchNode() {
+ if (node) {
+ cancelGet();
+ cancelListen();
+ cancelAnnounce();
+ }
+ }
+
+ /**
+ * Can we use this node to listen/announce now ?
+ */
+ bool isSynced(const time_point& now) const {
+ return not node->isExpired() and
+ not token.empty() and last_get_reply >= now - Node::NODE_EXPIRE_TIME;
+ }
+
+ time_point getSyncTime(const time_point& now) const {
+ if (node->isExpired() or token.empty())
+ return now;
+ return last_get_reply + Node::NODE_EXPIRE_TIME;
+ }
+
+ /**
+ * Could a particular "get" request be sent to this node now ?
+ *
+ * A 'get' request can be sent when all of the following requirements are
+ * met:
+ *
+ * - The node is not expired;
+ * - The pagination process for this particular 'get' must not have begun;
+ * - There hasn't been any response for a request, satisfying the initial
+ * request, anytime following the initial request.
+ * - No other request satisfying the request must be pending;
+ *
+ * @param now The time reference to now.
+ * @param update The time of the last 'get' op satisfying this request.
+ * @param q The query defining the "get" operation we're referring to.
+ *
+ * @return true if we can send get, else false.
+ */
+ bool canGet(time_point now, time_point update, const Sp<Query>& q) const {
+ if (node->isExpired())
+ return false;
+
+ bool pending {false},
+ completed_sq_status {false},
+ pending_sq_status {false};
+ for (const auto& s : getStatus) {
+ if (s.second and s.second->pending())
+ pending = true;
+ if (s.first and q and q->isSatisfiedBy(*s.first) and s.second) {
+ if (s.second->pending())
+ pending_sq_status = true;
+ else if (s.second->completed() and not (update > s.second->reply_time))
+ completed_sq_status = true;
+ if (completed_sq_status and pending_sq_status)
+ break;
+ }
+ }
+
+ return (not pending and now > last_get_reply + Node::NODE_EXPIRE_TIME) or
+ not (completed_sq_status or pending_sq_status or hasStartedPagination(q));
+ }
+
+ /**
+ * Tells if we have started sending a 'get' request in paginated form.
+ *
+ * @param q The query as an id for a given 'get' request.
+ *
+ * @return true if pagination process has started, else false.
+ */
+ bool hasStartedPagination(const Sp<Query>& q) const {
+ const auto& pqs = pagination_queries.find(q);
+ if (pqs == pagination_queries.cend() or pqs->second.empty())
+ return false;
+ return std::find_if(pqs->second.cbegin(), pqs->second.cend(),
+ [this](const Sp<Query>& query) {
+ const auto& req = getStatus.find(query);
+ return req != getStatus.cend() and req->second;
+ }) != pqs->second.cend();
+ };
+
+
+ /**
+ * Tell if the node has finished responding to a given 'get' request.
+ *
+ * A 'get' request can be divided in multiple requests called "pagination
+ * requests". If this is the case, we have to check if they're all finished.
+ * Otherwise, we only check for the single request.
+ *
+ * @param get The 'get' request data structure;
+ *
+ * @return true if it has finished, else false.
+ */
+ bool isDone(const Get& get) const {
+ if (hasStartedPagination(get.query)) {
+ const auto& pqs = pagination_queries.find(get.query);
+ auto paginationPending = std::find_if(pqs->second.cbegin(), pqs->second.cend(),
+ [this](const Sp<Query>& query) {
+ const auto& req = getStatus.find(query);
+ return req != getStatus.cend() and req->second and req->second->pending();
+ }) != pqs->second.cend();
+ return not paginationPending;
+ } else { /* no pagination yet */
+ const auto& gs = get.query ? getStatus.find(get.query) : getStatus.cend();
+ return gs != getStatus.end() and gs->second and not gs->second->pending();
+ }
+ }
+
+ void cancelGet() {
+ for (const auto& status : getStatus) {
+ if (status.second->pending()) {
+ node->cancelRequest(status.second);
+ }
+ }
+ getStatus.clear();
+ }
+
+ void onValues(const Sp<Query>& q, net::RequestAnswer&& answer, const TypeStore& types, Scheduler& scheduler)
+ {
+ auto l = listenStatus.find(q);
+ if (l != listenStatus.end()) {
+ auto next = l->second.cache.onValues(answer.values,
+ answer.refreshed_values,
+ answer.expired_values, types, scheduler.time());
+ scheduler.edit(l->second.cacheExpirationJob, next);
+ }
+ }
+
+ void expireValues(const Sp<Query>& q, Scheduler& scheduler) {
+ auto l = listenStatus.find(q);
+ if (l != listenStatus.end()) {
+ auto next = l->second.cache.expireValues(scheduler.time());
+ scheduler.edit(l->second.cacheExpirationJob, next);
+ }
+ }
+
+ /**
+ * Tells if a request in the status map is expired.
+ *
+ * @param status A SyncStatus reference.
+ *
+ * @return true if there exists an expired request, else false.
+ */
+ /*static bool expired(const SyncStatus& status) const {
+ return std::find_if(status.begin(), status.end(),
+ [](const SyncStatus::value_type& r){
+ return r.second and r.second->expired();
+ }) != status.end();
+ }*/
+
+ /**
+ * Tells if a request in the status map is pending.
+ *
+ * @param status A SyncStatus reference.
+ *
+ * @return true if there exists an expired request, else false.
+ */
+ static bool pending(const SyncStatus& status) {
+ return std::find_if(status.begin(), status.end(),
+ [](const SyncStatus::value_type& r){
+ return r.second and r.second->pending();
+ }) != status.end();
+ }
+ static bool pending(const NodeListenerStatus& status) {
+ return std::find_if(status.begin(), status.end(),
+ [](const NodeListenerStatus::value_type& r){
+ return r.second.req and r.second.req->pending();
+ }) != status.end();
+ }
+
+ bool pendingGet() const { return pending(getStatus); }
+
+ bool isAnnounced(Value::Id vid) const {
+ auto ack = acked.find(vid);
+ if (ack == acked.end() or not ack->second.first)
+ return false;
+ return ack->second.first->completed();
+ }
+ void cancelAnnounce() {
+ for (const auto& status : acked) {
+ const auto& req = status.second.first;
+ if (req and req->pending()) {
+ node->cancelRequest(req);
+ }
+ }
+ acked.clear();
+ }
+
+ bool isListening(time_point now) const {
+ auto ls = listenStatus.begin();
+ for ( ; ls != listenStatus.end() ; ++ls) {
+ if (isListening(now, ls)) {
+ break;
+ }
+ }
+ return ls != listenStatus.end();
+ }
+ bool isListening(time_point now, const Sp<Query>& q) const {
+ const auto& ls = listenStatus.find(q);
+ if (ls == listenStatus.end())
+ return false;
+ else
+ return isListening(now, ls);
+ }
+ bool isListening(time_point now, NodeListenerStatus::const_iterator listen_status) const {
+ if (listen_status == listenStatus.end() or not listen_status->second.req)
+ return false;
+ return listen_status->second.req->reply_time + LISTEN_EXPIRE_TIME > now;
+ }
+ void cancelListen() {
+ for (const auto& status : listenStatus)
+ node->cancelRequest(status.second.req);
+ listenStatus.clear();
+ }
+ void cancelListen(const Sp<Query>& query) {
+ auto it = listenStatus.find(query);
+ if (it != listenStatus.end()) {
+ node->cancelRequest(it->second.req);
+ listenStatus.erase(it);
+ }
+ }
+
+ /**
+ * Assuming the node is synced, should a "put" request be sent to this node now ?
+ */
+ time_point getAnnounceTime(Value::Id vid) const {
+ const auto& ack = acked.find(vid);
+ if (ack == acked.cend() or not ack->second.first) {
+ return time_point::min();
+ }
+ if (ack->second.first->completed()) {
+ return ack->second.second - REANNOUNCE_MARGIN;
+ }
+ return ack->second.first->pending() ? time_point::max() : time_point::min();
+ }
+
+ /**
+ * Assuming the node is synced, should the "listen" request with Query q be
+ * sent to this node now ?
+ */
+ time_point getListenTime(const Sp<Query>& q) const {
+ auto listen_status = listenStatus.find(q);
+ if (listen_status == listenStatus.end() or not listen_status->second.req)
+ return time_point::min();
+ return listen_status->second.req->pending() ? time_point::max() :
+ listen_status->second.req->reply_time + LISTEN_EXPIRE_TIME - REANNOUNCE_MARGIN;
+ }
+
+ /**
+ * Is this node expired or candidate
+ */
+ bool isBad() const {
+ return not node or node->isExpired() or candidate;
+ }
+};
+
+/**
+ * A search is a list of the nodes we think are responsible
+ * for storing values for a given hash.
+ */
+struct Dht::Search {
+ InfoHash id {};
+ sa_family_t af;
+
+ uint16_t tid;
+ time_point refill_time {time_point::min()};
+ time_point step_time {time_point::min()}; /* the time of the last search step */
+ Sp<Scheduler::Job> nextSearchStep {};
+
+ bool expired {false}; /* no node, or all nodes expired */
+ bool done {false}; /* search is over, cached for later */
+ std::vector<SearchNode> nodes {};
+
+ /* pending puts */
+ std::vector<Announce> announce {};
+
+ /* pending gets */
+ std::multimap<time_point, Get> callbacks {};
+
+ /* listeners */
+ std::map<size_t, SearchListener> listeners {};
+ size_t listener_token = 1;
+
+ /* Cache */
+ SearchCache cache;
+ Sp<Scheduler::Job> opExpirationJob;
+
+ ~Search() {
+ if (opExpirationJob)
+ opExpirationJob->cancel();
+ for (auto& get : callbacks) {
+ get.second.done_cb(false, {});
+ get.second.done_cb = {};
+ }
+ for (auto& put : announce) {
+ put.callback(false, {});
+ put.callback = {};
+ }
+ }
+
+ /**
+ * @returns true if the node was not present and added to the search
+ */
+ bool insertNode(const Sp<Node>& n, time_point now, const Blob& token={});
+
+ SearchNode* getNode(const Sp<Node>& n) {
+ auto srn = std::find_if(nodes.begin(), nodes.end(), [&](SearchNode& sn) {
+ return n == sn.node;
+ });
+ return (srn == nodes.end()) ? nullptr : &(*srn);
+ }
+
+ /* number of concurrent sync requests */
+ unsigned currentlySolicitedNodeCount() const {
+ unsigned count = 0;
+ for (const auto& n : nodes)
+ if (not n.isBad() and n.pendingGet())
+ count++;
+ return count;
+ }
+
+ /**
+ * Can we use this search to announce ?
+ */
+ bool isSynced(time_point now) const;
+
+ /**
+ * Get the time of the last "get" operation performed on this search,
+ * or time_point::min() if no such operation have been performed.
+ *
+ * @param query The query identifying a 'get' request.
+ */
+ time_point getLastGetTime(const Query&) const;
+ time_point getLastGetTime() const;
+
+ /**
+ * Is this get operation done ?
+ */
+ bool isDone(const Get& get) const;
+
+ /**
+ * Sets a consistent state of the search after a given 'get' operation as
+ * been completed.
+ *
+ * This will also make sure to call the associated 'done callback'.
+ *
+ * @param get The 'get' operation which is now over.
+ */
+ void setDone(const Get& get) {
+ for (auto& n : nodes) {
+ auto pqs = n.pagination_queries.find(get.query);
+ if (pqs != n.pagination_queries.cend()) {
+ for (auto& pq : pqs->second)
+ n.getStatus.erase(pq);
+ }
+ n.getStatus.erase(get.query);
+ }
+ if (get.done_cb)
+ get.done_cb(true, getNodes());
+ }
+
+ /**
+ * Set the search in a consistent state after the search is done. This is
+ * the opportunity we have to clear some entries in the SearchNodes status
+ * maps.
+ */
+ void setDone() {
+ for (auto& n : nodes) {
+ n.getStatus.clear();
+ n.listenStatus.clear();
+ n.acked.clear();
+ }
+ done = true;
+ }
+
+ bool isAnnounced(Value::Id id) const;
+ bool isListening(time_point now) const;
+
+ void get(Value::Filter f, const Sp<Query>& q, const QueryCallback& qcb, const GetCallback& gcb, const DoneCallback& dcb, Scheduler& scheduler) {
+ if (gcb or qcb) {
+ const auto& now = scheduler.time();
+ callbacks.emplace(now, Get { now, f, q, qcb, gcb, dcb });
+ auto values = cache.get(f);
+ if (not values.empty())
+ gcb(values);
+ scheduler.edit(nextSearchStep, now);
+ }
+ }
+
+ size_t listen(ValueCallback cb, Value::Filter f, const Sp<Query>& q, Scheduler& scheduler) {
+ //DHT_LOG.e(id, "[search %s IPv%c] listen", id.toString().c_str(), (af == AF_INET) ? '4' : '6');
+ return cache.listen(cb, q, f, [&](const Sp<Query>& q, ValueCallback vcb){
+ done = false;
+ auto token = ++listener_token;
+ listeners.emplace(token, SearchListener{q, f, vcb});
+ scheduler.edit(nextSearchStep, scheduler.time());
+ return token;
+ });
+ }
+
+ void cancelListen(size_t token, Scheduler& scheduler) {
+ cache.cancelListen(token, scheduler.time());
+ if (not opExpirationJob)
+ opExpirationJob = scheduler.add(time_point::max(), [this,&scheduler]{
+ auto nextExpire = cache.expire(scheduler.time(), [&](size_t t){
+ Sp<Query> query;
+ const auto& ll = listeners.find(t);
+ if (ll != listeners.cend()) {
+ query = ll->second.query;
+ listeners.erase(ll);
+ }
+ for (auto& sn : nodes) {
+ if (listeners.empty())
+ sn.cancelListen();
+ else if (query)
+ sn.cancelListen(query);
+ }
+ });
+ scheduler.edit(opExpirationJob, nextExpire);
+ });
+ scheduler.edit(opExpirationJob, cache.getExpiration());
+ }
+
+ /**
+ * @return The number of non-good search nodes.
+ */
+ unsigned getNumberOfBadNodes() const {
+ return std::count_if(nodes.begin(), nodes.end(), [](const SearchNode& sn) {
+ return sn.isBad();
+ });
+ }
+ unsigned getNumberOfConsecutiveBadNodes() const {
+ unsigned count = 0;
+ std::find_if(nodes.begin(), nodes.end(), [&count](const SearchNode& sn) {
+ if (not sn.isBad())
+ return true;
+ ++count;
+ return false;
+ });
+ return count;
+ }
+
+ /**
+ * Removes a node which have been expired for at least
+ * NODE::NODE_EXPIRE_TIME minutes. The search for an expired node starts
+ * from the end.
+ *
+ * @param now The reference to now.
+ *
+ * @return true if a node has been removed, else false.
+ */
+ bool removeExpiredNode(const time_point& now) {
+ for (auto e = nodes.cend(); e != nodes.cbegin();) {
+ const Node& n = *(--e)->node;
+ if (n.isRemovable(now)) {
+ //std::cout << "Removing expired node " << n.id << " from IPv" << (af==AF_INET?'4':'6') << " search " << id << std::endl;
+ nodes.erase(e);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * This method is called when we have discovered that the search is expired.
+ * We have to
+ *
+ * - remove all nodes from the search;
+ * - clear (non-permanent) callbacks;
+ */
+ void expire() {
+ // no nodes or all expired nodes. This is most likely a connectivity change event.
+ expired = true;
+
+ nodes.clear();
+ if (announce.empty() && listeners.empty())
+ // Listening or announcing requires keeping the cluster up to date.
+ setDone();
+ {
+ auto get_cbs = std::move(callbacks);
+ for (const auto& g : get_cbs) {
+ if (g.second.done_cb)
+ g.second.done_cb(false, {});
+ }
+ }
+ {
+ std::vector<DoneCallback> a_cbs;
+ a_cbs.reserve(announce.size());
+ for (auto ait = announce.begin() ; ait != announce.end(); ) {
+ if (ait->callback)
+ a_cbs.emplace_back(std::move(ait->callback));
+ if (not ait->permanent)
+ ait = announce.erase(ait);
+ else
+ ait++;
+ }
+ for (const auto& a : a_cbs)
+ a(false, {});
+ }
+ }
+
+ /**
+ * If the value was just successfully announced, call the callback and erase it if not permanent.
+ *
+ * @param vid The id of the announced value.
+ * @param types The sequence of existing types.
+ * @param now The time reference to now.
+ */
+ void checkAnnounced(Value::Id vid = Value::INVALID_ID) {
+ auto announced = std::partition(announce.begin(), announce.end(),
+ [this,&vid](Announce& a) {
+ if (vid != Value::INVALID_ID and (!a.value || a.value->id != vid))
+ return true;
+ if (isAnnounced(a.value->id)) {
+ if (a.callback) {
+ a.callback(true, getNodes());
+ a.callback = nullptr;
+ }
+ if (not a.permanent)
+ return false;
+ }
+ return true;
+ });
+ // remove acked for cleared annouces
+ for (auto it = announced; it != announce.end(); ++it) {
+ for (auto& n : nodes)
+ n.acked.erase(it->value->id);
+ }
+ announce.erase(announced, announce.end());
+ }
+
+ std::vector<Sp<Node>> getNodes() const;
+
+ void clear() {
+ announce.clear();
+ callbacks.clear();
+ listeners.clear();
+ nodes.clear();
+ nextSearchStep.reset();
+ }
+};
+
+
+/* A search contains a list of nodes, sorted by decreasing distance to the
+ target. We just got a new candidate, insert it at the right spot or
+ discard it. */
+bool
+Dht::Search::insertNode(const Sp<Node>& snode, time_point now, const Blob& token)
+{
+ auto& node = *snode;
+ const auto& nid = node.id;
+
+ if (node.getFamily() != af)
+ return false;
+
+ bool found = false;
+ auto n = nodes.end();
+ while (n != nodes.begin()) {
+ --n;
+ if (n->node == snode) {
+ found = true;
+ break;
+ }
+
+ /* Node not found. We could insert it after this one. */
+ if (id.xorCmp(nid, n->node->id) > 0) {
+ ++n;
+ break;
+ }
+ }
+
+ bool new_search_node = false;
+ if (not found) {
+ // find if and where to trim excessive nodes
+ auto t = nodes.cend();
+ size_t bad = 0; // number of bad nodes (if search is not expired)
+ bool full {false}; // is the search full (has the maximum nodes)
+ if (expired) {
+ // if the search is expired, trim to SEARCH_NODES nodes
+ if (nodes.size() >= SEARCH_NODES) {
+ full = true;
+ t = nodes.begin() + SEARCH_NODES;
+ }
+ } else {
+ // otherwise, trim to SEARCH_NODES nodes, not counting bad nodes
+ bad = getNumberOfBadNodes();
+ full = nodes.size() - bad >= SEARCH_NODES;
+ while (std::distance(nodes.cbegin(), t) - bad > SEARCH_NODES) {
+ --t;
+ if (t->isBad())
+ bad--;
+ }
+ }
+
+ if (full) {
+ if (t != nodes.cend())
+ nodes.resize(std::distance(nodes.cbegin(), t));
+ if (n >= t)
+ return false;
+ }
+
+ // Reset search timer if the search is empty
+ if (nodes.empty()) {
+ step_time = time_point::min();
+ }
+ n = nodes.insert(n, SearchNode(snode));
+ node.setTime(now);
+ new_search_node = true;
+ if (node.isExpired()) {
+ if (not expired)
+ bad++;
+ } else if (expired) {
+ bad = nodes.size() - 1;
+ expired = false;
+ }
+
+ while (nodes.size() - bad > SEARCH_NODES) {
+ if (not expired and nodes.back().isBad())
+ bad--;
+ nodes.pop_back();
+ }
+ }
+ if (not token.empty()) {
+ n->candidate = false;
+ n->last_get_reply = now;
+ if (token.size() <= 64)
+ n->token = token;
+ expired = false;
+ }
+ if (new_search_node)
+ removeExpiredNode(now);
+ return new_search_node;
+}
+
+std::vector<Sp<Node>>
+Dht::Search::getNodes() const
+{
+ std::vector<Sp<Node>> ret {};
+ ret.reserve(nodes.size());
+ for (const auto& sn : nodes)
+ ret.emplace_back(sn.node);
+ return ret;
+}
+
+bool
+Dht::Search::isSynced(time_point now) const
+{
+ unsigned i = 0;
+ for (const auto& n : nodes) {
+ if (n.isBad())
+ continue;
+ if (not n.isSynced(now))
+ return false;
+ if (++i == TARGET_NODES)
+ break;
+ }
+ return i > 0;
+}
+
+time_point
+Dht::Search::getLastGetTime(const Query& q) const
+{
+ time_point last = time_point::min();
+ for (const auto& g : callbacks)
+ last = std::max(last, (q.isSatisfiedBy(*g.second.query) ? g.second.start : time_point::min()));
+ return last;
+}
+
+time_point
+Dht::Search::getLastGetTime() const
+{
+ time_point last = time_point::min();
+ for (const auto& g : callbacks)
+ last = std::max(last, g.second.start);
+ return last;
+}
+
+bool
+Dht::Search::isDone(const Get& get) const
+{
+ unsigned i = 0;
+ for (const auto& sn : nodes) {
+ if (sn.isBad())
+ continue;
+ if (not sn.isDone(get))
+ return false;
+ if (++i == TARGET_NODES)
+ break;
+ }
+ return true;
+}
+
+bool
+Dht::Search::isAnnounced(Value::Id id) const
+{
+ if (nodes.empty())
+ return false;
+ unsigned i = 0;
+ for (const auto& n : nodes) {
+ if (n.isBad())
+ continue;
+ if (not n.isAnnounced(id))
+ return false;
+ if (++i == TARGET_NODES)
+ return true;
+ }
+ return i;
+}
+
+bool
+Dht::Search::isListening(time_point now) const
+{
+ if (nodes.empty() or listeners.empty())
+ return false;
+ unsigned i = 0;
+ for (const auto& n : nodes) {
+ if (n.isBad())
+ continue;
+ SearchNode::NodeListenerStatus::const_iterator ls {};
+ for (ls = n.listenStatus.begin(); ls != n.listenStatus.end() ; ++ls) {
+ if (n.isListening(now, ls))
+ break;
+ }
+ if (ls == n.listenStatus.end())
+ return false;
+ if (++i == LISTEN_NODES)
+ break;
+ }
+ return i;
+}
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Authors: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ * Simon Désaulniers <simon.desaulniers@savoirfairelinux.com>
+ * Sébastien Blin <sebastien.blin@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "securedht.h"
+#include "rng.h"
+
+#include "default_types.h"
+
+extern "C" {
+#include <gnutls/gnutls.h>
+#include <gnutls/abstract.h>
+#include <gnutls/x509.h>
+}
+
+#include <random>
+
+namespace dht {
+
+SecureDht::SecureDht(std::unique_ptr<DhtInterface> dht, SecureDht::Config conf)
+: dht_(std::move(dht)), key_(conf.id.first), certificate_(conf.id.second)
+{
+ if (!dht_) return;
+ for (const auto& type : DEFAULT_TYPES)
+ registerType(type);
+
+ for (const auto& type : DEFAULT_INSECURE_TYPES)
+ registerInsecureType(type);
+
+ registerInsecureType(CERTIFICATE_TYPE);
+
+ if (certificate_) {
+ auto certId = certificate_->getPublicKey().getId();
+ if (key_ and certId != key_->getPublicKey().getId())
+ throw DhtException("SecureDht: provided certificate doesn't match private key.");
+
+ dht_->put(certId, Value {
+ CERTIFICATE_TYPE,
+ *certificate_,
+ 1
+ }, [this](bool ok) {
+ if (ok)
+ DHT_LOG.DBG("SecureDht: public key announced successfully");
+ }, {}, true);
+ }
+}
+
+SecureDht::~SecureDht()
+{}
+
+ValueType
+SecureDht::secureType(ValueType&& type)
+{
+ type.storePolicy = [this,type](InfoHash id, Sp<Value>& v, const InfoHash& nid, const SockAddr& a) {
+ if (v->isSigned()) {
+ if (!v->signatureChecked) {
+ v->signatureChecked = true;
+ v->signatureValid = v->owner and v->owner->checkSignature(v->getToSign(), v->signature);
+ }
+ if (!v->signatureValid) {
+ DHT_LOG.WARN("Signature verification failed");
+ return false;
+ }
+ }
+ return type.storePolicy(id, v, nid, a);
+ };
+ type.editPolicy = [this,type](InfoHash id, const Sp<Value>& o, Sp<Value>& n, const InfoHash& nid, const SockAddr& a) {
+ if (!o->isSigned())
+ return type.editPolicy(id, o, n, nid, a);
+ if (o->owner != n->owner) {
+ DHT_LOG.WARN("Edition forbidden: owner changed.");
+ return false;
+ }
+ if (!n->signatureChecked) {
+ n->signatureChecked = true;
+ n->signatureValid = o->owner and o->owner->checkSignature(n->getToSign(), n->signature);
+ }
+ if (!n->signatureValid) {
+ DHT_LOG.WARN("Edition forbidden: signature verification failed.");
+ return false;
+ }
+ if (o->seq == n->seq) {
+ // If the data is exactly the same,
+ // it can be reannounced, possibly by someone else.
+ if (o->getToSign() != n->getToSign()) {
+ DHT_LOG.WARN("Edition forbidden: sequence number must be increasing.");
+ return false;
+ }
+ }
+ else if (n->seq < o->seq)
+ return false;
+ return true;
+ };
+ return type;
+}
+
+const Sp<crypto::Certificate>
+SecureDht::getCertificate(const InfoHash& node) const
+{
+ if (node == getId())
+ return certificate_;
+ auto it = nodesCertificates_.find(node);
+ if (it == nodesCertificates_.end())
+ return nullptr;
+ else
+ return it->second;
+}
+
+const Sp<const crypto::PublicKey>
+SecureDht::getPublicKey(const InfoHash& node) const
+{
+ if (node == getId())
+ return std::make_shared<crypto::PublicKey>(certificate_->getPublicKey());
+ auto it = nodesPubKeys_.find(node);
+ if (it == nodesPubKeys_.end())
+ return nullptr;
+ else
+ return it->second;
+}
+
+const Sp<crypto::Certificate>
+SecureDht::registerCertificate(const InfoHash& node, const Blob& data)
+{
+ Sp<crypto::Certificate> crt;
+ try {
+ crt = std::make_shared<crypto::Certificate>(data);
+ } catch (const std::exception& e) {
+ return nullptr;
+ }
+ InfoHash h = crt->getPublicKey().getId();
+ if (node == h) {
+ DHT_LOG.DBG("Registering certificate for %s", h.toString().c_str());
+ auto it = nodesCertificates_.find(h);
+ if (it == nodesCertificates_.end())
+ std::tie(it, std::ignore) = nodesCertificates_.emplace(h, std::move(crt));
+ else
+ it->second = std::move(crt);
+ return it->second;
+ } else {
+ DHT_LOG.WARN("Certificate %s for node %s does not match node id !", h.toString().c_str(), node.toString().c_str());
+ return nullptr;
+ }
+}
+
+void
+SecureDht::registerCertificate(Sp<crypto::Certificate>& cert)
+{
+ if (cert)
+ nodesCertificates_[cert->getId()] = cert;
+}
+
+void
+SecureDht::findCertificate(const InfoHash& node, std::function<void(const Sp<crypto::Certificate>)> cb)
+{
+ Sp<crypto::Certificate> b = getCertificate(node);
+ if (b && *b) {
+ DHT_LOG.DBG("Using certificate from cache for %s", node.toString().c_str());
+ if (cb)
+ cb(b);
+ return;
+ }
+ if (localQueryMethod_) {
+ auto res = localQueryMethod_(node);
+ if (not res.empty()) {
+ DHT_LOG.DBG("Registering certificate from local store for %s", node.toString().c_str());
+ nodesCertificates_.emplace(node, res.front());
+ if (cb)
+ cb(res.front());
+ return;
+ }
+ }
+
+ auto found = std::make_shared<bool>(false);
+ dht_->get(node, [cb,node,found,this](const std::vector<Sp<Value>>& vals) {
+ if (*found)
+ return false;
+ for (const auto& v : vals) {
+ if (auto cert = registerCertificate(node, v->data)) {
+ *found = true;
+ DHT_LOG.DBG("Found certificate for %s", node.toString().c_str());
+ if (cb)
+ cb(cert);
+ return false;
+ }
+ }
+ return true;
+ }, [cb,found](bool) {
+ if (!*found and cb)
+ cb(nullptr);
+ }, Value::TypeFilter(CERTIFICATE_TYPE));
+}
+
+void
+SecureDht::findPublicKey(const InfoHash& node, std::function<void(const Sp<const crypto::PublicKey>)> cb)
+{
+ auto pk = getPublicKey(node);
+ if (pk && *pk) {
+ DHT_LOG.DBG("Found public key from cache for %s", node.toString().c_str());
+ if (cb)
+ cb(pk);
+ return;
+ }
+ findCertificate(node, [=](const Sp<crypto::Certificate> crt) {
+ if (crt && *crt) {
+ auto pk = std::make_shared<crypto::PublicKey>(crt->getPublicKey());
+ if (*pk) {
+ nodesPubKeys_[pk->getId()] = pk;
+ if (cb) cb(pk);
+ return;
+ }
+ }
+ if (cb) cb(nullptr);
+ });
+}
+
+Sp<Value>
+SecureDht::checkValue(const Sp<Value>& v)
+{
+ // Decrypt encrypted values
+ if (v->isEncrypted()) {
+ if (not key_) {
+#if OPENDHT_PROXY_SERVER
+ if (forward_all_) // We are currently a proxy, send messages to clients.
+ return v;
+#endif
+ return {};
+ }
+ if (v->decrypted) {
+ return v->decryptedValue;
+ }
+ v->decrypted = true;
+ try {
+ Value decrypted_val (decrypt(*v));
+ if (decrypted_val.recipient == getId()) {
+ if (decrypted_val.owner)
+ nodesPubKeys_[decrypted_val.owner->getId()] = decrypted_val.owner;
+ v->decryptedValue = std::make_shared<Value>(std::move(decrypted_val));
+ return v->decryptedValue;
+ }
+ // Ignore values belonging to other people
+ } catch (const std::exception& e) {
+ DHT_LOG.WARN("Could not decrypt value %s : %s", v->toString().c_str(), e.what());
+ }
+ }
+ // Check signed values
+ else if (v->isSigned()) {
+ if (v->signatureChecked) {
+ return v->signatureValid ? v : Sp<Value>{};
+ }
+ v->signatureChecked = true;
+ if (v->owner and v->owner->checkSignature(v->getToSign(), v->signature)) {
+ v->signatureValid = true;
+ nodesPubKeys_[v->owner->getId()] = v->owner;
+ return v;
+ }
+ else
+ DHT_LOG.WARN("Signature verification failed for %s", v->toString().c_str());
+ }
+ // Forward normal values
+ else {
+ return v;
+ }
+ return {};
+}
+
+ValueCallback
+SecureDht::getCallbackFilter(ValueCallback cb, Value::Filter&& filter)
+{
+ return [=](const std::vector<Sp<Value>>& values, bool expired) {
+ std::vector<Sp<Value>> tmpvals {};
+ for (const auto& v : values) {
+ if (auto nv = checkValue(v))
+ if (not filter or filter(*nv))
+ tmpvals.emplace_back(std::move(nv));
+ }
+ if (cb and not tmpvals.empty())
+ return cb(tmpvals, expired);
+ return true;
+ };
+}
+
+
+GetCallback
+SecureDht::getCallbackFilter(GetCallback cb, Value::Filter&& filter)
+{
+ return [=](const std::vector<Sp<Value>>& values) {
+ std::vector<Sp<Value>> tmpvals {};
+ for (const auto& v : values) {
+ if (auto nv = checkValue(v))
+ if (not filter or filter(*nv))
+ tmpvals.emplace_back(std::move(nv));
+ }
+ if (cb and not tmpvals.empty())
+ return cb(tmpvals);
+ return true;
+ };
+}
+
+void
+SecureDht::get(const InfoHash& id, GetCallback cb, DoneCallback donecb, Value::Filter&& f, Where&& w)
+{
+ dht_->get(id, getCallbackFilter(cb, std::forward<Value::Filter>(f)), donecb, {}, std::forward<Where>(w));
+}
+
+size_t
+SecureDht::listen(const InfoHash& id, ValueCallback cb, Value::Filter f, Where w)
+{
+ return dht_->listen(id, getCallbackFilter(cb, std::forward<Value::Filter>(f)), {}, std::forward<Where>(w));
+}
+
+
+size_t
+SecureDht::listen(const InfoHash& id, GetCallback cb, Value::Filter f, Where w)
+{
+ return dht_->listen(id, getCallbackFilter(cb, std::forward<Value::Filter>(f)), {}, std::forward<Where>(w));
+}
+
+void
+SecureDht::putSigned(const InfoHash& hash, Sp<Value> val, DoneCallback callback, bool permanent)
+{
+ if (val->id == Value::INVALID_ID) {
+ crypto::random_device rdev;
+ std::uniform_int_distribution<Value::Id> rand_id;
+ val->id = rand_id(rdev);
+ }
+
+ // Check if we are already announcing a value
+ auto p = dht_->getPut(hash, val->id);
+ if (p && val->seq <= p->seq) {
+ val->seq = p->seq + 1;
+ }
+
+ // Check if data already exists on the dht
+ get(hash,
+ [val,this] (const std::vector<Sp<Value>>& vals) {
+ DHT_LOG.DBG("Found online previous value being announced.");
+ for (const auto& v : vals) {
+ if (!v->isSigned())
+ DHT_LOG.ERR("Existing non-signed value seems to exists at this location.");
+ else if (not v->owner or v->owner->getId() != getId())
+ DHT_LOG.ERR("Existing signed value belonging to someone else seems to exists at this location.");
+ else if (val->seq <= v->seq)
+ val->seq = v->seq + 1;
+ }
+ return true;
+ },
+ [hash,val,this,callback,permanent] (bool /* ok */) {
+ sign(*val);
+ dht_->put(hash, val, callback, time_point::max(), permanent);
+ },
+ Value::IdFilter(val->id),
+ std::move(Where().id(val->id))
+ );
+}
+
+void
+SecureDht::putEncrypted(const InfoHash& hash, const InfoHash& to, Sp<Value> val, DoneCallback callback, bool permanent)
+{
+ findPublicKey(to, [=](const Sp<const crypto::PublicKey>& pk) {
+ if(!pk || !*pk) {
+ if (callback)
+ callback(false, {});
+ return;
+ }
+ DHT_LOG.WARN("Encrypting data for PK: %s", pk->getId().toString().c_str());
+ try {
+ dht_->put(hash, encrypt(*val, *pk), callback, time_point::max(), permanent);
+ } catch (const std::exception& e) {
+ DHT_LOG.ERR("Error putting encrypted data: %s", e.what());
+ if (callback)
+ callback(false, {});
+ }
+ });
+}
+
+void
+SecureDht::sign(Value& v) const
+{
+ v.sign(*key_);
+}
+
+Value
+SecureDht::encrypt(Value& v, const crypto::PublicKey& to) const
+{
+ return v.encrypt(*key_, to);
+}
+
+Value
+SecureDht::decrypt(const Value& v)
+{
+ if (not v.isEncrypted())
+ throw DhtException("Data is not encrypted.");
+
+ auto decrypted = key_->decrypt(v.cypher);
+
+ Value ret {v.id};
+ auto msg = msgpack::unpack((const char*)decrypted.data(), decrypted.size());
+ ret.msgpack_unpack_body(msg.get());
+
+ if (ret.recipient != getId())
+ throw crypto::DecryptError("Recipient mismatch");
+ if (not ret.owner or not ret.owner->checkSignature(ret.getToSign(), ret.signature))
+ throw crypto::DecryptError("Signature mismatch");
+
+ return ret;
+}
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "infohash.h"
+#include "value.h"
+#include "listener.h"
+
+#include <map>
+#include <utility>
+
+namespace dht {
+
+/**
+ * Tracks storage usage per IP or IP range
+ */
+class StorageBucket {
+public:
+ void insert(const InfoHash& id, const Value& value, time_point expiration) {
+ totalSize_ += value.size();
+ storedValues_.emplace(expiration, std::pair<InfoHash, Value::Id>(id, value.id));
+ }
+ void erase(const InfoHash& id, const Value& value, time_point expiration) {
+ auto size = value.size();
+ totalSize_ -= size;
+ auto range = storedValues_.equal_range(expiration);
+ for (auto rit = range.first; rit != range.second;) {
+ if (rit->second.first == id && rit->second.second == value.id) {
+ storedValues_.erase(rit);
+ break;
+ } else
+ ++rit;
+ }
+ }
+ size_t size() const { return totalSize_; }
+ std::pair<InfoHash, Value::Id> getOldest() const { return storedValues_.begin()->second; }
+private:
+ std::multimap<time_point, std::pair<InfoHash, Value::Id>> storedValues_;
+ size_t totalSize_ {0};
+};
+
+struct ValueStorage {
+ Sp<Value> data {};
+ time_point created {};
+ time_point expiration {};
+ StorageBucket* store_bucket {nullptr};
+
+ ValueStorage() {}
+ ValueStorage(const Sp<Value>& v, time_point t, time_point e)
+ : data(v), created(t), expiration(e) {}
+};
+
+
+struct Storage {
+ time_point maintenance_time {};
+ std::map<Sp<Node>, std::map<size_t, Listener>> listeners;
+ std::map<size_t, LocalListener> local_listeners {};
+ size_t listener_token {1};
+
+ /* The maximum number of values we store for a given hash. */
+ static constexpr unsigned MAX_VALUES {1024};
+
+ /**
+ * Changes caused by an operation on the storage.
+ */
+ struct StoreDiff {
+ /** Difference in stored size caused by the op */
+ ssize_t size_diff;
+ /** Difference in number of values */
+ ssize_t values_diff;
+ /** Difference in number of listeners */
+ ssize_t listeners_diff;
+ };
+
+ Storage() {}
+ Storage(time_point t) : maintenance_time(t) {}
+
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ <= 9 || defined(_WIN32)
+ // GCC-bug: remove me when support of GCC < 4.9.2 is abandoned
+ Storage(Storage&& o) noexcept
+ : maintenance_time(std::move(o.maintenance_time))
+ , listeners(std::move(o.listeners))
+ , local_listeners(std::move(o.local_listeners))
+ , listener_token(std::move(o.listener_token))
+ , values(std::move(o.values))
+ , total_size(std::move(o.total_size)) {}
+#else
+ Storage(Storage&& o) noexcept = default;
+#endif
+
+ Storage& operator=(Storage&& o) = default;
+
+ bool empty() const {
+ return values.empty();
+ }
+
+ StoreDiff clear();
+
+ size_t valueCount() const {
+ return values.size();
+ }
+
+ size_t totalSize() const {
+ return total_size;
+ }
+
+ const std::vector<ValueStorage>& getValues() const { return values; }
+
+ Sp<Value> getById(Value::Id vid) const {
+ for (auto& v : values)
+ if (v.data->id == vid) return v.data;
+ return {};
+ }
+
+ std::vector<Sp<Value>> get(Value::Filter f = {}) const {
+ std::vector<Sp<Value>> newvals {};
+ if (not f) newvals.reserve(values.size());
+ for (auto& v : values) {
+ if (not f || f(*v.data))
+ newvals.push_back(v.data);
+ }
+ return newvals;
+ }
+
+ /**
+ * Stores a new value in this storage, or replace a previous value
+ *
+ * @return <storage, change_size, change_value_num>
+ * storage: set if a change happened
+ * change_size: size difference
+ * change_value_num: change of value number (0 or 1)
+ */
+ std::pair<ValueStorage*, StoreDiff>
+ store(const InfoHash& id, const Sp<Value>&, time_point created, time_point expiration, StorageBucket*);
+
+ /**
+ * Refreshes the time point of the value's lifetime begining.
+ *
+ * @param now The reference to now
+ * @param vid The value id
+ * @return time of the next expiration, time_point::max() if no expiration
+ */
+ time_point refresh(const time_point& now, const Value::Id& vid, const TypeStore& types) {
+ for (auto& vs : values)
+ if (vs.data->id == vid) {
+ vs.created = now;
+ vs.expiration = std::max(vs.expiration, now + types.getType(vs.data->type).expiration);
+ return vs.expiration;
+ }
+ return time_point::max();
+ }
+
+ StoreDiff remove(const InfoHash& id, Value::Id);
+
+ std::pair<ssize_t, std::vector<Sp<Value>>> expire(const InfoHash& id, time_point now);
+
+private:
+ Storage(const Storage&) = delete;
+ Storage& operator=(const Storage&) = delete;
+
+ std::vector<ValueStorage> values {};
+ size_t total_size {};
+};
+
+
+std::pair<ValueStorage*, Storage::StoreDiff>
+Storage::store(const InfoHash& id, const Sp<Value>& value, time_point created, time_point expiration, StorageBucket* sb)
+{
+ auto it = std::find_if (values.begin(), values.end(), [&](const ValueStorage& vr) {
+ return vr.data == value || vr.data->id == value->id;
+ });
+ ssize_t size_new = value->size();
+ if (it != values.end()) {
+ /* Already there, only need to refresh */
+ it->created = created;
+ size_t size_old = it->data->size();
+ ssize_t size_diff = size_new - (ssize_t)size_old;
+ if (it->data != value) {
+ //DHT_LOG.DEBUG("Updating %s -> %s", id.toString().c_str(), value->toString().c_str());
+ // clear quota for previous value
+ if (it->store_bucket)
+ it->store_bucket->erase(id, *value, it->expiration);
+ it->expiration = expiration;
+ // update quota for new value
+ it->store_bucket = sb;
+ if (sb)
+ sb->insert(id, *value, expiration);
+ it->data = value;
+ total_size += size_diff;
+ return std::make_pair(&(*it), StoreDiff{size_diff, 0, 0});
+ }
+ return std::make_pair(nullptr, StoreDiff{});
+ } else {
+ //DHT_LOG.DEBUG("Storing %s -> %s", id.toString().c_str(), value->toString().c_str());
+ if (values.size() < MAX_VALUES) {
+ total_size += size_new;
+ values.emplace_back(value, created, expiration);
+ values.back().store_bucket = sb;
+ if (sb)
+ sb->insert(id, *value, expiration);
+ return std::make_pair(&values.back(), StoreDiff{size_new, 1, 0});
+ }
+ return std::make_pair(nullptr, StoreDiff{});
+ }
+}
+
+Storage::StoreDiff
+Storage::remove(const InfoHash& id, Value::Id vid)
+{
+ auto it = std::find_if (values.begin(), values.end(), [&](const ValueStorage& vr) {
+ return vr.data->id == vid;
+ });
+ if (it == values.end())
+ return {};
+ ssize_t size = it->data->size();
+ if (it->store_bucket)
+ it->store_bucket->erase(id, *it->data, it->expiration);
+ total_size -= size;
+ values.erase(it);
+ return {-size, -1, 0};
+}
+
+Storage::StoreDiff
+Storage::clear()
+{
+ ssize_t num_values = values.size();
+ ssize_t tot_size = total_size;
+ values.clear();
+ total_size = 0;
+ return {-tot_size, -num_values, 0};
+}
+
+std::pair<ssize_t, std::vector<Sp<Value>>>
+Storage::expire(const InfoHash& id, time_point now)
+{
+ // expire listeners
+ ssize_t del_listen {0};
+ for (auto nl_it = listeners.begin(); nl_it != listeners.end();) {
+ auto& node_listeners = nl_it->second;
+ for (auto l = node_listeners.cbegin(); l != node_listeners.cend();) {
+ bool expired = l->second.time + Node::NODE_EXPIRE_TIME < now;
+ if (expired)
+ l = node_listeners.erase(l);
+ else
+ ++l;
+ }
+ if (node_listeners.empty()) {
+ nl_it = listeners.erase(nl_it);
+ del_listen--;
+ }
+ else
+ ++nl_it;
+ }
+
+ // expire values
+ auto r = std::partition(values.begin(), values.end(), [&](const ValueStorage& v) {
+ return v.expiration > now;
+ });
+ std::vector<Sp<Value>> ret;
+ ret.reserve(std::distance(r, values.end()));
+ ssize_t size_diff {};
+ std::for_each(r, values.end(), [&](const ValueStorage& v) {
+ size_diff -= v.data->size();
+ if (v.store_bucket)
+ v.store_bucket->erase(id, *v.data, v.expiration);
+ ret.emplace_back(std::move(v.data));
+ });
+ total_size += size_diff;
+ values.erase(r, values.end());
+ return {size_diff, std::move(ret)};
+}
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "utils.h"
+#include "sockaddr.h"
+#include "default_types.h"
+
+/* An IPv4 equivalent to IN6_IS_ADDR_UNSPECIFIED */
+#ifndef IN_IS_ADDR_UNSPECIFIED
+#define IN_IS_ADDR_UNSPECIFIED(a) (((long int) (a)->s_addr) == 0x00000000)
+#endif /* IN_IS_ADDR_UNSPECIFIED */
+
+namespace dht {
+
+static constexpr std::array<uint8_t, 12> MAPPED_IPV4_PREFIX {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff}};
+
+std::pair<std::string, std::string>
+splitPort(const std::string& s) {
+ if (s.empty())
+ return {};
+ if (s[0] == '[') {
+ std::size_t closure = s.find_first_of(']');
+ std::size_t found = s.find_last_of(':');
+ if (closure == std::string::npos)
+ return {s, ""};
+ if (found == std::string::npos or found < closure)
+ return {s.substr(1,closure-1), ""};
+ return {s.substr(1,closure-1), s.substr(found+1)};
+ }
+ std::size_t found = s.find_last_of(':');
+ std::size_t first = s.find_first_of(':');
+ if (found == std::string::npos or found != first)
+ return {s, ""};
+ return {s.substr(0,found), s.substr(found+1)};
+}
+
+std::vector<SockAddr>
+SockAddr::resolve(const std::string& host, const std::string& service)
+{
+ std::vector<SockAddr> ips {};
+ if (host.empty())
+ return ips;
+
+ addrinfo hints;
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_socktype = SOCK_DGRAM;
+ addrinfo* info = nullptr;
+ int rc = getaddrinfo(host.c_str(), service.empty() ? nullptr : service.c_str(), &hints, &info);
+ if(rc != 0)
+ throw std::invalid_argument(std::string("Error: `") + host + ":" + service + "`: " + gai_strerror(rc));
+
+ addrinfo* infop = info;
+ while (infop) {
+ ips.emplace_back(infop->ai_addr, infop->ai_addrlen);
+ infop = infop->ai_next;
+ }
+ freeaddrinfo(info);
+ return ips;
+}
+
+
+std::string
+print_addr(const sockaddr* sa, socklen_t slen)
+{
+ char hbuf[NI_MAXHOST];
+ char sbuf[NI_MAXSERV];
+ std::stringstream out;
+ if (!getnameinfo(sa, slen, hbuf, sizeof(hbuf), sbuf, sizeof(sbuf), NI_NUMERICHOST | NI_NUMERICSERV)) {
+ if (sa->sa_family == AF_INET6)
+ out << "[" << hbuf << "]";
+ else
+ out << hbuf;
+ if (std::strcmp(sbuf, "0"))
+ out << ":" << sbuf;
+ } else
+ out << "[invalid address]";
+ return out.str();
+}
+
+std::string
+print_addr(const sockaddr_storage& ss, socklen_t sslen)
+{
+ return print_addr((const sockaddr*)&ss, sslen);
+}
+
+bool
+SockAddr::isUnspecified() const
+{
+ switch (getFamily()) {
+ case AF_INET:
+ return IN_IS_ADDR_UNSPECIFIED(&getIPv4().sin_addr);
+ case AF_INET6:
+ return IN6_IS_ADDR_UNSPECIFIED(reinterpret_cast<const in6_addr*>(&getIPv6().sin6_addr));
+ default:
+ return true;
+ }
+}
+
+bool
+SockAddr::isLoopback() const
+{
+ switch (getFamily()) {
+ case AF_INET: {
+ auto addr_host = ntohl(getIPv4().sin_addr.s_addr);
+ uint8_t b1 = (uint8_t)(addr_host >> 24);
+ return b1 == 127;
+ }
+ case AF_INET6:
+ return IN6_IS_ADDR_LOOPBACK(reinterpret_cast<const in6_addr*>(&getIPv6().sin6_addr));
+ default:
+ return false;
+ }
+}
+
+bool
+SockAddr::isPrivate() const
+{
+ if (isLoopback()) {
+ return true;
+ }
+ switch (getFamily()) {
+ case AF_INET: {
+ auto addr_host = ntohl(getIPv4().sin_addr.s_addr);
+ uint8_t b1, b2;
+ b1 = (uint8_t)(addr_host >> 24);
+ b2 = (uint8_t)((addr_host >> 16) & 0x0ff);
+ // 10.x.y.z
+ if (b1 == 10)
+ return true;
+ // 172.16.0.0 - 172.31.255.255
+ if ((b1 == 172) && (b2 >= 16) && (b2 <= 31))
+ return true;
+ // 192.168.0.0 - 192.168.255.255
+ if ((b1 == 192) && (b2 == 168))
+ return true;
+ return false;
+ }
+ case AF_INET6: {
+ const uint8_t* addr6 = reinterpret_cast<const uint8_t*>(&getIPv6().sin6_addr);
+ if (addr6[0] == 0xfc)
+ return true;
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+bool
+SockAddr::isMappedIPv4() const
+{
+ if (getFamily() != AF_INET6)
+ return false;
+ const uint8_t* addr6 = reinterpret_cast<const uint8_t*>(&getIPv6().sin6_addr);
+ return std::equal(MAPPED_IPV4_PREFIX.begin(), MAPPED_IPV4_PREFIX.end(), addr6);
+}
+
+SockAddr
+SockAddr::getMappedIPv4() const
+{
+ if (not isMappedIPv4())
+ return *this;
+ SockAddr ret;
+ ret.setFamily(AF_INET);
+ ret.setPort(getPort());
+ auto addr6 = reinterpret_cast<const uint8_t*>(&getIPv6().sin6_addr);
+ auto addr4 = reinterpret_cast<uint8_t*>(&ret.getIPv4().sin_addr);
+ addr6 += MAPPED_IPV4_PREFIX.size();
+ std::copy_n(addr6, sizeof(in_addr), addr4);
+ return ret;
+}
+
+bool operator==(const SockAddr& a, const SockAddr& b) {
+ return a.equals(b);
+}
+
+time_point from_time_t(std::time_t t) {
+ return clock::now() + (std::chrono::system_clock::from_time_t(t) - std::chrono::system_clock::now());
+}
+
+std::time_t to_time_t(time_point t) {
+ return std::chrono::system_clock::to_time_t(
+ std::chrono::system_clock::now() +
+ std::chrono::duration_cast<std::chrono::system_clock::duration>(t - clock::now()));
+}
+
+Blob
+unpackBlob(msgpack::object& o) {
+ switch (o.type) {
+ case msgpack::type::BIN:
+ return {o.via.bin.ptr, o.via.bin.ptr+o.via.bin.size};
+ case msgpack::type::STR:
+ return {o.via.str.ptr, o.via.str.ptr+o.via.str.size};
+ case msgpack::type::ARRAY: {
+ Blob ret(o.via.array.size);
+ std::transform(o.via.array.ptr, o.via.array.ptr+o.via.array.size, ret.begin(), [](const msgpack::object& b) {
+ return b.as<uint8_t>();
+ });
+ return ret;
+ }
+ default:
+ throw msgpack::type_error();
+ }
+}
+
+msgpack::unpacked
+unpackMsg(Blob b) {
+ return msgpack::unpack((const char*)b.data(), b.size());
+}
+
+msgpack::object*
+findMapValue(msgpack::object& map, const std::string& key) {
+ if (map.type != msgpack::type::MAP) throw msgpack::type_error();
+ for (unsigned i = 0; i < map.via.map.size; i++) {
+ auto& o = map.via.map.ptr[i];
+ if (o.key.type == msgpack::type::STR && o.key.as<std::string>() == key)
+ return &o.val;
+ }
+ return nullptr;
+}
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ * Simon Désaulniers <simon.desaulniers@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "value.h"
+
+#include "default_types.h"
+#include "securedht.h" // print certificate ID
+
+#ifdef OPENDHT_JSONCPP
+#include "base64.h"
+#endif
+
+
+namespace dht {
+
+const std::string Query::QUERY_PARSE_ERROR {"Error parsing query."};
+
+Value::Filter bindFilterRaw(FilterRaw raw_filter, void* user_data) {
+ if (not raw_filter) return {};
+ return [=](const Value& value) {
+ return raw_filter(value, user_data);
+ };
+}
+
+std::ostream& operator<< (std::ostream& s, const Value& v)
+{
+ auto flags(s.flags());
+ s << "Value[id:" << std::hex << v.id << std::dec << " ";
+ if (v.isEncrypted())
+ s << "encrypted ";
+ else if (v.isSigned()) {
+ s << "signed (v" << v.seq << ") ";
+ if (v.recipient)
+ s << "decrypted ";
+ }
+ if (not v.isEncrypted()) {
+ if (v.type == IpServiceAnnouncement::TYPE.id) {
+ s << IpServiceAnnouncement(v.data);
+ } else if (v.type == CERTIFICATE_TYPE.id) {
+ s << "Certificate";
+#ifdef OPENDHT_LOG_CRT_ID
+ try {
+ auto h = crypto::Certificate(v.data).getPublicKey().getLongId();
+ s << " with ID " << h;
+ } catch (const std::exception& e) {
+ s << " (invalid)";
+ }
+#endif
+ } else {
+ s << "Data (type: " << v.type << " ): ";
+ s << std::hex;
+ for (size_t i=0; i<v.data.size(); i++)
+ s << std::setfill('0') << std::setw(2) << (unsigned)v.data[i];
+ s << std::dec;
+ }
+ }
+ s << "]";
+ s.flags(flags);
+ return s;
+}
+
+const ValueType ValueType::USER_DATA = {0, "User Data"};
+
+bool
+ValueType::DEFAULT_STORE_POLICY(InfoHash, std::shared_ptr<Value>& v, const InfoHash&, const SockAddr&)
+{
+ return v->size() <= MAX_VALUE_SIZE;
+}
+
+msgpack::object*
+findMapValue(const msgpack::object& map, const std::string& key) {
+ if (map.type != msgpack::type::MAP) throw msgpack::type_error();
+ for (unsigned i = 0; i < map.via.map.size; i++) {
+ auto& o = map.via.map.ptr[i];
+ if(o.key.type != msgpack::type::STR)
+ continue;
+ if (o.key.as<std::string>() == key) {
+ return &o.val;
+ }
+ }
+ return nullptr;
+}
+
+size_t
+Value::size() const
+{
+ return cypher.size() + data.size() + signature.size() + user_type.size();
+}
+
+void
+Value::msgpack_unpack(msgpack::object o)
+{
+ if (o.type != msgpack::type::MAP) throw msgpack::type_error();
+ if (o.via.map.size < 2) throw msgpack::type_error();
+
+ if (auto rid = findMapValue(o, "id")) {
+ id = rid->as<Id>();
+ } else
+ throw msgpack::type_error();
+
+ if (auto rdat = findMapValue(o, "dat")) {
+ msgpack_unpack_body(*rdat);
+ } else
+ throw msgpack::type_error();
+}
+
+void
+Value::msgpack_unpack_body(const msgpack::object& o)
+{
+ owner = {};
+ recipient = {};
+ cypher.clear();
+ signature.clear();
+ data.clear();
+ type = 0;
+
+ if (o.type == msgpack::type::BIN) {
+ auto dat = o.as<std::vector<char>>();
+ cypher = {dat.begin(), dat.end()};
+ } else {
+ if (o.type != msgpack::type::MAP)
+ throw msgpack::type_error();
+ auto rbody = findMapValue(o, "body");
+ if (not rbody)
+ throw msgpack::type_error();
+
+ if (auto rdata = findMapValue(*rbody, "data")) {
+ data = unpackBlob(*rdata);
+ } else
+ throw msgpack::type_error();
+
+ if (auto rtype = findMapValue(*rbody, "type")) {
+ type = rtype->as<ValueType::Id>();
+ } else
+ throw msgpack::type_error();
+
+ if (auto rutype = findMapValue(*rbody, "utype")) {
+ user_type = rutype->as<std::string>();
+ }
+
+ if (auto rowner = findMapValue(*rbody, "owner")) {
+ if (auto rseq = findMapValue(*rbody, "seq"))
+ seq = rseq->as<decltype(seq)>();
+ else
+ throw msgpack::type_error();
+ crypto::PublicKey new_owner;
+ new_owner.msgpack_unpack(*rowner);
+ owner = std::make_shared<const crypto::PublicKey>(std::move(new_owner));
+ if (auto rrecipient = findMapValue(*rbody, "to")) {
+ recipient = rrecipient->as<InfoHash>();
+ }
+
+ if (auto rsig = findMapValue(o, "sig")) {
+ signature = unpackBlob(*rsig);
+ } else
+ throw msgpack::type_error();
+ }
+ }
+}
+
+#ifdef OPENDHT_JSONCPP
+Value::Value(Json::Value& json)
+{
+ id = Value::Id(unpackId(json, "id"));
+ if (json.isMember("cypher")) {
+ auto cypherStr = json["cypher"].asString();
+ cypherStr = base64_decode(cypherStr);
+ cypher = std::vector<unsigned char>(cypherStr.begin(), cypherStr.end());
+ }
+ if (json.isMember("sig")) {
+ auto sigStr = json["sig"].asString();
+ sigStr = base64_decode(sigStr);
+ signature = std::vector<unsigned char>(sigStr.begin(), sigStr.end());
+ }
+ if (json.isMember("seq"))
+ seq = json["seq"].asInt();
+ if (json.isMember("owner")) {
+ auto ownerStr = json["owner"].asString();
+ auto ownerBlob = std::vector<unsigned char>(ownerStr.begin(), ownerStr.end());
+ owner = std::make_shared<const crypto::PublicKey>(ownerBlob);
+ }
+ if (json.isMember("to")) {
+ auto toStr = json["to"].asString();
+ recipient = InfoHash(toStr);
+ }
+ if (json.isMember("type"))
+ type = json["type"].asInt();
+ if (json.isMember("data")){
+ auto dataStr = json["data"].asString();
+ dataStr = base64_decode(dataStr);
+ data = std::vector<unsigned char>(dataStr.begin(), dataStr.end());
+ }
+ if (json.isMember("utype"))
+ user_type = json["utype"].asString();
+}
+
+Json::Value
+Value::toJson() const
+{
+ Json::Value val;
+ val["id"] = std::to_string(id);
+ if (isEncrypted()) {
+ val["cypher"] = base64_encode(cypher);
+ } else {
+ if (isSigned())
+ val["sig"] = base64_encode(signature);
+ bool has_owner = owner && *owner;
+ if (has_owner) { // isSigned
+ val["seq"] = seq;
+ val["owner"] = owner->toString();
+ if (recipient)
+ val["to"] = recipient.toString();
+ }
+ val["type"] = type;
+ val["data"] = base64_encode(data);
+ if (not user_type.empty())
+ val["utype"] = user_type;
+ }
+ return val;
+}
+
+uint64_t
+unpackId(const Json::Value& json, const std::string& key) {
+ uint64_t ret = 0;
+ try {
+ if (json.isMember(key)) {
+ const auto& t = json[key];
+ if (t.isString()) {
+ ret = std::stoull(t.asString());
+ } else {
+ ret = t.asLargestUInt();
+ }
+ }
+ } catch (...) {}
+ return ret;
+}
+#endif
+
+bool
+FieldValue::operator==(const FieldValue& vfd) const
+{
+ if (field != vfd.field)
+ return false;
+ switch (field) {
+ case Value::Field::Id:
+ case Value::Field::ValueType:
+ case Value::Field::SeqNum:
+ return intValue == vfd.intValue;
+ case Value::Field::OwnerPk:
+ return hashValue == vfd.hashValue;
+ case Value::Field::UserType:
+ return blobValue == vfd.blobValue;
+ case Value::Field::None:
+ return true;
+ default:
+ return false;
+ }
+}
+
+Value::Filter
+FieldValue::getLocalFilter() const
+{
+ switch (field) {
+ case Value::Field::Id:
+ return Value::IdFilter(intValue);
+ case Value::Field::ValueType:
+ return Value::TypeFilter(intValue);
+ case Value::Field::OwnerPk:
+ return Value::OwnerFilter(hashValue);
+ case Value::Field::SeqNum:
+ return Value::SeqNumFilter(intValue);
+ case Value::Field::UserType:
+ return Value::UserTypeFilter(std::string {blobValue.begin(), blobValue.end()});
+ default:
+ return Value::AllFilter();
+ }
+}
+
+FieldValueIndex::FieldValueIndex(const Value& v, Select s)
+{
+ auto selection = s.getSelection();
+ if (not selection.empty()) {
+ std::transform(selection.begin(), selection.end(), std::inserter(index, index.end()),
+ [](const std::set<Value::Field>::value_type& f) {
+ return std::make_pair(f, FieldValue {});
+ });
+ } else {
+ index.clear();
+ for (size_t f = 1 ; f < static_cast<int>(Value::Field::COUNT) ; ++f)
+ index[static_cast<Value::Field>(f)] = {};
+ }
+ for (const auto& fvp : index) {
+ const auto& f = fvp.first;
+ switch (f) {
+ case Value::Field::Id:
+ index[f] = {f, v.id};
+ break;
+ case Value::Field::ValueType:
+ index[f] = {f, v.type};
+ break;
+ case Value::Field::OwnerPk:
+ index[f] = {f, v.owner ? v.owner->getId() : InfoHash() };
+ break;
+ case Value::Field::SeqNum:
+ index[f] = {f, v.seq};
+ break;
+ case Value::Field::UserType:
+ index[f] = {f, Blob {v.user_type.begin(), v.user_type.end()}};
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+bool FieldValueIndex::containedIn(const FieldValueIndex& other) const {
+ if (index.size() > other.index.size())
+ return false;
+ for (const auto& field : index) {
+ auto other_field = other.index.find(field.first);
+ if (other_field == other.index.end())
+ return false;
+ }
+ return true;
+}
+
+std::ostream& operator<<(std::ostream& os, const FieldValueIndex& fvi) {
+ os << "Index[";
+ for (auto v = fvi.index.begin(); v != fvi.index.end(); ++v) {
+ switch (v->first) {
+ case Value::Field::Id: {
+ auto flags(os.flags());
+ os << "Id:" << std::hex << v->second.getInt();
+ os.flags(flags);
+ break;
+ }
+ case Value::Field::ValueType:
+ os << "ValueType:" << v->second.getInt();
+ break;
+ case Value::Field::OwnerPk:
+ os << "Owner:" << v->second.getHash().toString();
+ break;
+ case Value::Field::SeqNum:
+ os << "Seq:" << v->second.getInt();
+ break;
+ case Value::Field::UserType: {
+ auto ut = v->second.getBlob();
+ os << "UserType:" << std::string(ut.begin(), ut.end());
+ break;
+ }
+ default:
+ break;
+ }
+ os << (std::next(v) != fvi.index.end() ? "," : "");
+ }
+ return os << "]";
+}
+
+void
+FieldValueIndex::msgpack_unpack_fields(const std::set<Value::Field>& fields, const msgpack::object& o, unsigned offset)
+{
+ index.clear();
+
+ unsigned j = 0;
+ for (const auto& field : fields) {
+ auto& field_value = o.via.array.ptr[offset+(j++)];
+ switch (field) {
+ case Value::Field::Id:
+ case Value::Field::ValueType:
+ case Value::Field::SeqNum:
+ index[field] = FieldValue(field, field_value.as<uint64_t>());
+ break;
+ case Value::Field::OwnerPk:
+ index[field] = FieldValue(field, field_value.as<InfoHash>());
+ break;
+ case Value::Field::UserType:
+ index[field] = FieldValue(field, field_value.as<Blob>());
+ break;
+ default:
+ throw msgpack::type_error();
+ }
+ }
+}
+
+void trim_str(std::string& str) {
+ auto first = std::min(str.size(), str.find_first_not_of(" "));
+ auto last = std::min(str.size(), str.find_last_not_of(" "));
+ str = str.substr(first, last - first + 1);
+}
+
+Select::Select(const std::string& q_str) {
+ std::istringstream q_iss {q_str};
+ std::string token {};
+ q_iss >> token;
+
+ if (token == "SELECT" or token == "select") {
+ q_iss >> token;
+ std::istringstream fields {token};
+
+ while (std::getline(fields, token, ',')) {
+ trim_str(token);
+ if (token == "id")
+ field(Value::Field::Id);
+ else if (token == "value_type")
+ field(Value::Field::ValueType);
+ else if (token == "owner_pk")
+ field(Value::Field::OwnerPk);
+ if (token == "seq")
+ field(Value::Field::SeqNum);
+ else if (token == "user_type")
+ field(Value::Field::UserType);
+ }
+ }
+}
+
+Where::Where(const std::string& q_str) {
+ std::istringstream q_iss {q_str};
+ std::string token {};
+ q_iss >> token;
+ if (token == "WHERE" or token == "where") {
+ std::getline(q_iss, token);
+ std::istringstream restrictions {token};
+ while (std::getline(restrictions, token, ',')) {
+ trim_str(token);
+ std::istringstream eq_ss {token};
+ std::string field_str, value_str;
+ std::getline(eq_ss, field_str, '=');
+ trim_str(field_str);
+ std::getline(eq_ss, value_str, '=');
+ trim_str(value_str);
+
+ if (not value_str.empty()) {
+ uint64_t v = 0;
+ std::string s {};
+ std::istringstream convert {value_str};
+ convert >> v;
+ if (not convert
+ and value_str.size() > 1
+ and value_str[0] == '"'
+ and value_str[value_str.size()-1] == '"')
+ s = value_str.substr(1, value_str.size()-2);
+ else
+ s = value_str;
+ if (field_str == "id")
+ id(v);
+ else if (field_str == "value_type")
+ valueType(v);
+ else if (field_str == "owner_pk")
+ owner(InfoHash(s));
+ else if (field_str == "seq")
+ seq(v);
+ else if (field_str == "user_type")
+ userType(s);
+ else
+ throw std::invalid_argument(Query::QUERY_PARSE_ERROR + " (WHERE) wrong token near: " + field_str);
+ }
+ }
+ }
+}
+
+void
+Query::msgpack_unpack(const msgpack::object& o)
+{
+ if (o.type != msgpack::type::MAP)
+ throw msgpack::type_error();
+
+ auto rfilters = findMapValue(o, "w"); /* unpacking filters */
+ if (rfilters)
+ where.msgpack_unpack(*rfilters);
+ else
+ throw msgpack::type_error();
+
+ auto rfield_selector = findMapValue(o, "s"); /* unpacking field selectors */
+ if (rfield_selector)
+ select.msgpack_unpack(*rfield_selector);
+ else
+ throw msgpack::type_error();
+}
+
+template <typename T>
+bool subset(std::vector<T> fds, std::vector<T> qfds)
+{
+ for (auto& fd : fds) {
+ auto correspondance = std::find_if(qfds.begin(), qfds.end(), [&fd](T& _vfd) { return fd == _vfd; });
+ if (correspondance == qfds.end())
+ return false;
+ }
+ return true;
+}
+
+bool Select::isSatisfiedBy(const Select& os) const {
+ /* empty, means all values are selected. */
+ if (fieldSelection_.empty() and not os.fieldSelection_.empty())
+ return false;
+ else
+ return subset(fieldSelection_, os.fieldSelection_);
+}
+
+bool Where::isSatisfiedBy(const Where& ow) const {
+ return subset(ow.filters_, filters_);
+}
+
+bool Query::isSatisfiedBy(const Query& q) const {
+ return none or (where.isSatisfiedBy(q.where) and select.isSatisfiedBy(q.select));
+}
+
+std::ostream& operator<<(std::ostream& s, const dht::Select& select) {
+ s << "SELECT " << (select.fieldSelection_.empty() ? "*" : "");
+ for (auto fs = select.fieldSelection_.begin() ; fs != select.fieldSelection_.end() ; ++fs) {
+ switch (*fs) {
+ case Value::Field::Id:
+ s << "id";
+ break;
+ case Value::Field::ValueType:
+ s << "value_type";
+ break;
+ case Value::Field::UserType:
+ s << "user_type";
+ break;
+ case Value::Field::OwnerPk:
+ s << "owner_public_key";
+ break;
+ case Value::Field::SeqNum:
+ s << "seq";
+ break;
+ default:
+ break;
+ }
+ s << (std::next(fs) != select.fieldSelection_.end() ? "," : "");
+ }
+ return s;
+}
+
+std::ostream& operator<<(std::ostream& s, const dht::Where& where) {
+ if (not where.filters_.empty()) {
+ s << "WHERE ";
+ for (auto f = where.filters_.begin() ; f != where.filters_.end() ; ++f) {
+ switch (f->getField()) {
+ case Value::Field::Id:
+ s << "id=" << f->getInt();
+ break;
+ case Value::Field::ValueType:
+ s << "value_type=" << f->getInt();
+ break;
+ case Value::Field::OwnerPk:
+ s << "owner_pk_hash=" << f->getHash().toString();
+ break;
+ case Value::Field::SeqNum:
+ s << "seq=" << f->getInt();
+ break;
+ case Value::Field::UserType: {
+ auto b = f->getBlob();
+ s << "user_type=" << std::string {b.begin(), b.end()};
+ break;
+ }
+ default:
+ break;
+ }
+ s << (std::next(f) != where.filters_.end() ? "," : "");
+ }
+ }
+ return s;
+}
+
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2018 Savoir-faire Linux Inc.
+ * Author(s) : Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+#pragma once
+
+#include "value.h"
+
+namespace dht {
+
+using ValueStateCallback = std::function<void(const std::vector<Sp<Value>>&, bool)>;
+using CallbackQueue = std::list<std::function<void()>>;
+
+class ValueCache {
+public:
+ ValueCache(ValueStateCallback&& cb) : callback(std::forward<ValueStateCallback>(cb)) {}
+ ValueCache(ValueCache&& o) : values(std::move(o.values)), callback(std::move(o.callback)) {
+ o.callback = {};
+ }
+
+ ~ValueCache() {
+ auto q = clear();
+ for (auto& cb: q)
+ cb();
+ }
+
+ CallbackQueue clear() {
+ std::vector<Sp<Value>> expired_values;
+ expired_values.reserve(values.size());
+ for (const auto& v : values)
+ expired_values.emplace_back(std::move(v.second.data));
+ values.clear();
+ CallbackQueue ret;
+ if (not expired_values.empty() and callback) {
+ auto cb = callback;
+ ret.emplace_back([expired_values, cb]{
+ cb(expired_values, true);
+ });
+ }
+ return ret;
+ }
+
+ time_point expireValues(const time_point& now) {
+ time_point ret = time_point::max();
+ auto cbs = expireValues(now, ret);
+ while (not cbs.empty()) {
+ cbs.front()();
+ cbs.pop_front();
+ }
+ return ret;
+ }
+
+ CallbackQueue expireValues(const time_point& now, time_point& next) {
+ std::vector<Sp<Value>> expired_values;
+ for (auto it = values.begin(); it != values.end();) {
+ if (it->second.expiration <= now) {
+ expired_values.emplace_back(std::move(it->second.data));
+ it = values.erase(it);
+ } else {
+ next = std::min(next, it->second.expiration);
+ ++it;
+ }
+ }
+ while (values.size() > MAX_VALUES) {
+ // too many values, remove oldest values
+ time_point oldest_creation = time_point::max();
+ auto oldest_value = values.end();
+ for (auto it = values.begin(); it != values.end(); ++it)
+ if (it->second.created < oldest_creation) {
+ oldest_value = it;
+ oldest_creation = it->second.created;
+ }
+ if (oldest_value != values.end()) {
+ expired_values.emplace_back(std::move(oldest_value->second.data));
+ values.erase(oldest_value);
+ }
+ }
+ CallbackQueue ret;
+ if (not expired_values.empty() and callback) {
+ auto cb = callback;
+ ret.emplace_back([cb, expired_values]{
+ if (cb) cb(expired_values, true);
+ });
+ }
+ return ret;
+ }
+
+ time_point onValues
+ (const std::vector<Sp<Value>>& values,
+ const std::vector<Value::Id>& refreshed_values,
+ const std::vector<Value::Id>& expired_values,
+ const TypeStore& types, const time_point& now)
+ {
+ CallbackQueue cbs;
+ time_point ret = time_point::max();
+ if (not values.empty())
+ cbs.splice(cbs.end(), addValues(values, types, now));
+ for (const auto& vid : refreshed_values)
+ refreshValue(vid, types, now);
+ for (const auto& vid : expired_values)
+ cbs.splice(cbs.end(), expireValue(vid));
+ cbs.splice(cbs.end(), expireValues(now, ret));
+ while (not cbs.empty()) {
+ cbs.front()();
+ cbs.pop_front();
+ }
+ return ret;
+ }
+
+private:
+ // prevent copy
+ ValueCache(const ValueCache&) = delete;
+ ValueCache& operator=(const ValueCache&) = delete;
+ ValueCache& operator=(ValueCache&&) = delete;
+
+ /* The maximum number of values we store in the cache. */
+ static constexpr unsigned MAX_VALUES {4096};
+
+ struct CacheValueStorage {
+ Sp<Value> data {};
+ time_point created {};
+ time_point expiration {};
+
+ CacheValueStorage() {}
+ CacheValueStorage(const Sp<Value>& v, time_point t, time_point e)
+ : data(v), created(t), expiration(e) {}
+ };
+
+ std::map<Value::Id, CacheValueStorage> values;
+ ValueStateCallback callback;
+
+ CallbackQueue addValues(const std::vector<Sp<Value>>& new_values, const TypeStore& types, const time_point& now) {
+ std::vector<Sp<Value>> nvals;
+ for (const auto& value : new_values) {
+ auto v = values.find(value->id);
+ if (v == values.end()) {
+ // new value
+ nvals.emplace_back(value);
+ values.emplace(value->id, CacheValueStorage(value, now, now + types.getType(value->type).expiration));
+ } else {
+ // refreshed value
+ v->second.created = now;
+ v->second.expiration = now + types.getType(v->second.data->type).expiration;
+ }
+ }
+ auto cb = callback;
+ CallbackQueue ret;
+ if (not nvals.empty())
+ ret.emplace_back([cb, nvals]{
+ if (cb) cb(nvals, false);
+ });
+ return ret;
+ }
+ CallbackQueue expireValue(Value::Id vid) {
+ auto v = values.find(vid);
+ if (v == values.end())
+ return {};
+ const std::vector<Sp<Value>> val {std::move(v->second.data)};
+ values.erase(v);
+ auto cb = callback;
+ CallbackQueue ret;
+ ret.emplace_back([cb, val]{
+ if (cb) cb(val, true);
+ });
+ return ret;
+ }
+ void refreshValue(Value::Id vid, const TypeStore& types, const time_point& now) {
+ auto v = values.find(vid);
+ if (v == values.end())
+ return;
+ v->second.created = now;
+ v->second.expiration = now + types.getType(v->second.data->type).expiration;
+ }
+};
+
+}
--- /dev/null
+if ENABLE_TESTS
+bin_PROGRAMS = opendht_unit_tests
+
+AM_CPPFLAGS = -I../include
+
+nobase_include_HEADERS = infohashtester.h cryptotester.h dhtrunnertester.h dhtproxytester.h
+opendht_unit_tests_SOURCES = tests_runner.cpp cryptotester.cpp infohashtester.cpp dhtrunnertester.cpp dhtproxytester.cpp
+opendht_unit_tests_LDFLAGS = -lopendht -lcppunit -L@top_builddir@/src/.libs @GnuTLS_LIBS@
+endif
--- /dev/null
+/*
+ * Copyright (C) 2018 Savoir-faire Linux Inc.
+ *
+ * Author: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "cryptotester.h"
+
+#include "opendht/crypto.h"
+
+namespace test {
+CPPUNIT_TEST_SUITE_REGISTRATION(CryptoTester);
+
+void
+CryptoTester::setUp() {
+
+}
+
+void
+CryptoTester::testSignatureEncryption() {
+ auto key = dht::crypto::PrivateKey::generate();
+ auto public_key = key.getPublicKey();
+
+ std::vector<uint8_t> data {5, 10};
+ std::vector<uint8_t> signature = key.sign(data);
+
+ // check signature
+ CPPUNIT_ASSERT(public_key.checkSignature(data, signature));
+
+ // encrypt data
+ std::vector<uint8_t> encrypted = public_key.encrypt(data);
+ std::vector<uint8_t> decrypted = key.decrypt(encrypted);
+ CPPUNIT_ASSERT(data == decrypted);
+}
+
+void
+CryptoTester::testCertificateRevocation()
+{
+ auto ca1 = dht::crypto::generateIdentity("ca1");
+ auto account1 = dht::crypto::generateIdentity("acc1", ca1, 4096, true);
+ auto device11 = dht::crypto::generateIdentity("dev11", account1);
+ auto device12 = dht::crypto::generateIdentity("dev12", account1);
+
+
+ dht::crypto::TrustList list;
+ list.add(*ca1.second);
+ auto v = list.verify(*account1.second);
+ CPPUNIT_ASSERT_MESSAGE(v.toString(), v);
+
+ list.add(*account1.second);
+ v = list.verify(*device11.second);
+ CPPUNIT_ASSERT_MESSAGE(v.toString(), v);
+ v = list.verify(*device12.second);
+ CPPUNIT_ASSERT_MESSAGE(v.toString(), v);
+
+ auto ca2 = dht::crypto::generateIdentity("ca2");
+ auto account2 = dht::crypto::generateIdentity("acc2", ca2, 4096, true);
+ auto device2 = dht::crypto::generateIdentity("dev2", account2);
+
+ v = list.verify(*device2.second);
+ CPPUNIT_ASSERT_MESSAGE(v.toString(), !v);
+
+ account1.second->revoke(*account1.first, *device11.second);
+ dht::crypto::TrustList list2;
+ list2.add(*account1.second);
+
+ v = list2.verify(*device11.second);
+ CPPUNIT_ASSERT_MESSAGE(v.toString(), !v);
+ v = list2.verify(*device12.second);
+ CPPUNIT_ASSERT_MESSAGE(v.toString(), v);
+}
+
+void
+CryptoTester::tearDown() {
+
+}
+} // namespace test
--- /dev/null
+/*
+ * Copyright (C) 2018 Savoir-faire Linux Inc.
+ *
+ * Author: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+// cppunit
+#include <cppunit/TestFixture.h>
+#include <cppunit/extensions/HelperMacros.h>
+
+namespace test {
+
+class CryptoTester : public CppUnit::TestFixture {
+ CPPUNIT_TEST_SUITE(CryptoTester);
+ CPPUNIT_TEST(testSignatureEncryption);
+ CPPUNIT_TEST(testCertificateRevocation);
+ CPPUNIT_TEST_SUITE_END();
+
+ public:
+ /**
+ * Method automatically called before each test by CppUnit
+ */
+ void setUp();
+ /**
+ * Method automatically called after each test CppUnit
+ */
+ void tearDown();
+ /**
+ * Test data signature, encryption and decryption
+ */
+ void testSignatureEncryption();
+ /**
+ * Test certificate generation, validation and revocation
+ */
+ void testCertificateRevocation();
+};
+
+} // namespace test
--- /dev/null
+/*
+ * Copyright (C) 2018 Savoir-faire Linux Inc.
+ *
+ * Author: Sébastien Blin <sebastien.blin@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "dhtproxytester.h"
+
+// std
+#include <iostream>
+#include <string>
+
+#include <chrono>
+#include <condition_variable>
+
+
+namespace test {
+CPPUNIT_TEST_SUITE_REGISTRATION(DhtProxyTester);
+
+void
+DhtProxyTester::setUp() {
+ nodePeer.run(42222, {}, true);
+ nodeProxy = std::make_shared<dht::DhtRunner>();
+ nodeClient = std::make_shared<dht::DhtRunner>();
+
+ nodeProxy->run(42232, {}, true);
+ nodeProxy->bootstrap(nodePeer.getBound());
+ server = std::unique_ptr<dht::DhtProxyServer>(new dht::DhtProxyServer(nodeProxy, 8080));
+
+ nodeClient->run(42242, {}, true);
+ nodeClient->bootstrap(nodePeer.getBound());
+ nodeClient->setProxyServer("127.0.0.1:8080");
+ nodeClient->enableProxy(true);
+}
+
+void
+DhtProxyTester::tearDown() {
+ nodePeer.join();
+ nodeClient->join();
+ server->stop();
+ server = nullptr;
+ nodeProxy->join();
+}
+
+void
+DhtProxyTester::testGetPut() {
+ bool done = false;
+ std::condition_variable cv;
+ std::mutex cv_m;
+
+ auto key = dht::InfoHash::get("GLaDOs");
+ dht::Value val {"Hey! It's been a long time. How have you been?"};
+ auto val_data = val.data;
+
+ nodePeer.put(key, std::move(val), [&](bool) {
+ done = true;
+ cv.notify_all();
+ });
+ std::unique_lock<std::mutex> lk(cv_m);
+ cv.wait_for(lk, std::chrono::seconds(10), [&]{ return done; });
+
+ auto vals = nodeClient->get(key).get();
+ CPPUNIT_ASSERT(not vals.empty());
+ CPPUNIT_ASSERT(vals.front()->data == val_data);
+}
+
+
+void
+DhtProxyTester::testListen() {
+ bool done = false;
+ std::condition_variable cv;
+ std::mutex cv_m;
+ std::unique_lock<std::mutex> lk(cv_m);
+ auto key = dht::InfoHash::get("GLaDOs");
+
+ // If a peer send a value, the listen operation from the client
+ // should retrieve this value
+ dht::Value firstVal {"Hey! It's been a long time. How have you been?"};
+ auto firstVal_data = firstVal.data;
+ nodePeer.put(key, std::move(firstVal), [&](bool) {
+ done = true;
+ cv.notify_all();
+ });
+ cv.wait_for(lk, std::chrono::seconds(10), [&]{ return done; });
+ done = false;
+
+ auto values = std::vector<dht::Blob>();
+ nodeClient->listen(key, [&](const std::vector<std::shared_ptr<dht::Value>>& v, bool) {
+ for (const auto& value : v)
+ values.emplace_back(value->data);
+ done = true;
+ cv.notify_all();
+ return true;
+ });
+
+ cv.wait_for(lk, std::chrono::seconds(10), [&]{ return done; });
+ done = false;
+ // Here values should contains 2 values
+ CPPUNIT_ASSERT_EQUAL(static_cast<int>(values.size()), 1);
+ CPPUNIT_ASSERT(values.front() == firstVal_data);
+
+ // And the listen should retrieve futures values
+ // All values
+ dht::Value secondVal {"You're a monster"};
+ auto secondVal_data = secondVal.data;
+ nodePeer.put(key, std::move(secondVal));
+ cv.wait_for(lk, std::chrono::seconds(10), [&]{ return done; });
+ // Here values should contains 3 values
+ CPPUNIT_ASSERT_EQUAL(static_cast<int>(values.size()), 2);
+ CPPUNIT_ASSERT(values.back() == secondVal_data);
+}
+
+} // namespace test
--- /dev/null
+/*
+ * Copyright (C) 2018 Savoir-faire Linux Inc.
+ *
+ * Author: Sébastien Blin <sebastien.blin@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+// cppunit
+#include <cppunit/TestFixture.h>
+#include <cppunit/extensions/HelperMacros.h>
+
+#include <opendht/dhtrunner.h>
+#include <opendht/dht_proxy_server.h>
+
+namespace test {
+
+class DhtProxyTester : public CppUnit::TestFixture {
+ CPPUNIT_TEST_SUITE(DhtProxyTester);
+ CPPUNIT_TEST(testGetPut);
+ CPPUNIT_TEST(testListen);
+ CPPUNIT_TEST_SUITE_END();
+
+ public:
+ /**
+ * Method automatically called before each test by CppUnit
+ * Init nodes
+ */
+ void setUp();
+ /**
+ * Method automatically called after each test CppUnit
+ */
+ void tearDown();
+ /**
+ * Test get and put methods
+ */
+ void testGetPut();
+ /**
+ * Test listen
+ */
+ void testListen();
+
+ private:
+ dht::DhtRunner nodePeer {};
+
+ std::shared_ptr<dht::DhtRunner> nodeClient;
+ std::shared_ptr<dht::DhtRunner> nodeProxy;
+ std::unique_ptr<dht::DhtProxyServer> server;
+};
+
+} // namespace test
--- /dev/null
+/*
+ * Copyright (C) 2018 Savoir-faire Linux Inc.
+ *
+ * Author: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "dhtrunnertester.h"
+
+// std
+#include <iostream>
+#include <string>
+
+namespace test {
+CPPUNIT_TEST_SUITE_REGISTRATION(DhtRunnerTester);
+
+void
+DhtRunnerTester::setUp() {
+ node1.run(42222, {}, true);
+ node2.run(42232, {}, true);
+ node2.bootstrap(node1.getBound());
+}
+
+void
+DhtRunnerTester::tearDown() {
+ node1.join();
+ node2.join();
+}
+
+void
+DhtRunnerTester::testConstructors() {
+ CPPUNIT_ASSERT(node1.getBoundPort() == 42222);
+ CPPUNIT_ASSERT(node2.getBoundPort() == 42232);
+}
+
+void
+DhtRunnerTester::testGetPut() {
+ auto key = dht::InfoHash::get("123");
+ dht::Value val {"hey"};
+ auto val_data = val.data;
+ std::promise<bool> p;
+ node2.put(key, std::move(val), [&](bool ok){
+ p.set_value(ok);
+ });
+ CPPUNIT_ASSERT(p.get_future().get());
+ auto vals = node1.get(key).get();
+ CPPUNIT_ASSERT(not vals.empty());
+ CPPUNIT_ASSERT(vals.front()->data == val_data);
+}
+
+void
+DhtRunnerTester::testListen() {
+ std::atomic_uint valueCount(0);
+ std::atomic_uint putCount(0);
+
+ auto a = dht::InfoHash::get("234");
+ auto b = dht::InfoHash::get("2345");
+ auto c = dht::InfoHash::get("23456");
+ constexpr unsigned N = 32;
+
+ auto ftokena = node1.listen(a, [&](const std::shared_ptr<dht::Value>&){
+ valueCount++;
+ return true;
+ });
+
+ auto ftokenb = node1.listen(b, [&](const std::shared_ptr<dht::Value>&){
+ valueCount++;
+ return false;
+ });
+
+ auto ftokenc = node1.listen(c, [&](const std::shared_ptr<dht::Value>&){
+ valueCount++;
+ return true;
+ });
+
+ for (unsigned i=0; i<N; i++) {
+ node2.put(a, dht::Value("v1"), [&](bool ok) { if (ok) putCount++; });
+ node2.put(b, dht::Value("v2"), [&](bool ok) { if (ok) putCount++; });
+ }
+
+ std::this_thread::sleep_for(std::chrono::milliseconds(100));
+ auto tokena = ftokena.get();
+ auto tokenb = ftokenb.get();
+ auto tokenc = ftokenc.get();
+
+ CPPUNIT_ASSERT(tokena);
+ CPPUNIT_ASSERT(tokenb);
+ CPPUNIT_ASSERT(tokenc);
+ CPPUNIT_ASSERT_EQUAL(N * 2u, putCount.load());
+ CPPUNIT_ASSERT_EQUAL(N + 1u, valueCount.load());
+}
+
+} // namespace test
--- /dev/null
+/*
+ * Copyright (C) 2018 Savoir-faire Linux Inc.
+ *
+ * Author: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+// cppunit
+#include <cppunit/TestFixture.h>
+#include <cppunit/extensions/HelperMacros.h>
+
+#include <opendht/dhtrunner.h>
+
+namespace test {
+
+class DhtRunnerTester : public CppUnit::TestFixture {
+ CPPUNIT_TEST_SUITE(DhtRunnerTester);
+ CPPUNIT_TEST(testConstructors);
+ CPPUNIT_TEST(testGetPut);
+ CPPUNIT_TEST(testListen);
+ CPPUNIT_TEST_SUITE_END();
+
+ dht::DhtRunner node1 {};
+ dht::DhtRunner node2 {};
+ public:
+ /**
+ * Method automatically called before each test by CppUnit
+ */
+ void setUp();
+ /**
+ * Method automatically called after each test CppUnit
+ */
+ void tearDown();
+ /**
+ * Test the differents behaviors of constructors
+ */
+ void testConstructors();
+ /**
+ * Test get and put methods
+ */
+ void testGetPut();
+ /**
+ * Test listen method
+ */
+ void testListen();
+};
+
+} // namespace test
--- /dev/null
+/*
+ * Copyright (C) 2018 Savoir-faire Linux Inc.
+ *
+ * Author: Sébastien Blin <sebastien.blin@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "infohashtester.h"
+
+// std
+#include <iostream>
+#include <string>
+
+// opendht
+#include "opendht/infohash.h"
+
+namespace test {
+CPPUNIT_TEST_SUITE_REGISTRATION(InfoHashTester);
+
+void
+InfoHashTester::setUp() {
+
+}
+
+void
+InfoHashTester::testConstructors() {
+ // Default constructor creates a null infohash
+ auto nullHash = dht::InfoHash();
+ CPPUNIT_ASSERT(nullHash.size() == 20);
+ CPPUNIT_ASSERT(!nullHash);
+ // Build from a uint8_t. if length to short, should get a null infohash
+ uint8_t to_short[] = {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8};
+ auto infohash = dht::InfoHash(to_short, 8);
+ CPPUNIT_ASSERT(infohash.size() == 20);
+ CPPUNIT_ASSERT_EQUAL(infohash.toString(),
+ std::string("0000000000000000000000000000000000000000"));
+ // Build from a uint8_t. if length is enough, data should contains the uint8_t
+ uint8_t enough[] = {0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa,
+ 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa};
+ infohash = dht::InfoHash(enough, 20);
+ CPPUNIT_ASSERT(infohash.size() == 20);
+ const auto* data = infohash.data();
+ for (auto i = 0; i < 20; ++i) {
+ CPPUNIT_ASSERT_EQUAL(enough[i], data[i]);
+ }
+ // if too long, should be cutted to 20
+ uint8_t tooLong[] = {0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa,
+ 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb0};
+ infohash = dht::InfoHash(tooLong, 21);
+ CPPUNIT_ASSERT(infohash.size() == 20);
+ const auto* data2 = infohash.data();
+ for (auto i = 0; i < 20; ++i) {
+ CPPUNIT_ASSERT_EQUAL(enough[i], data2[i]);
+ }
+ // Build from string
+ auto infohashFromStr = dht::InfoHash("0102030405060708090A0102030405060708090A");
+ CPPUNIT_ASSERT(infohashFromStr.size() == 20);
+ const auto* dataStr = infohashFromStr.data();
+ for (auto i = 0; i < 20; ++i) {
+ CPPUNIT_ASSERT_EQUAL((int)dataStr[i], (int)data[i]);
+ }
+}
+
+void
+InfoHashTester::testComperators() {
+ auto nullHash = dht::InfoHash();
+ auto minHash = dht::InfoHash("0000000000000000000000000000000000111110");
+ auto maxHash = dht::InfoHash("0111110000000000000000000000000000000000");
+ // operator ==
+ CPPUNIT_ASSERT_EQUAL(minHash, minHash);
+ CPPUNIT_ASSERT_EQUAL(minHash, dht::InfoHash("0000000000000000000000000000000000111110"));
+ CPPUNIT_ASSERT(!(minHash == maxHash));
+ // operator !=
+ CPPUNIT_ASSERT(!(minHash != minHash));
+ CPPUNIT_ASSERT(!(minHash != dht::InfoHash("0000000000000000000000000000000000111110")));
+ CPPUNIT_ASSERT(minHash != maxHash);
+ // operator<
+ CPPUNIT_ASSERT(nullHash < minHash);
+ CPPUNIT_ASSERT(nullHash < maxHash);
+ CPPUNIT_ASSERT(minHash < maxHash);
+ CPPUNIT_ASSERT(!(minHash < nullHash));
+ CPPUNIT_ASSERT(!(maxHash < nullHash));
+ CPPUNIT_ASSERT(!(maxHash < minHash));
+ CPPUNIT_ASSERT(!(minHash < minHash));
+ // bool()
+ CPPUNIT_ASSERT(maxHash);
+ CPPUNIT_ASSERT(!nullHash);
+
+}
+
+void
+InfoHashTester::testLowBit() {
+ auto nullHash = dht::InfoHash();
+ auto minHash = dht::InfoHash("0000000000000000000000000000000000000010");
+ auto maxHash = dht::InfoHash("0100000000000000000000000000000000000000");
+ CPPUNIT_ASSERT_EQUAL(nullHash.lowbit(), -1);
+ CPPUNIT_ASSERT_EQUAL(minHash.lowbit(), 155);
+ CPPUNIT_ASSERT_EQUAL(maxHash.lowbit(), 7);
+}
+
+void
+InfoHashTester::testCommonBits() {
+ auto nullHash = dht::InfoHash();
+ auto minHash = dht::InfoHash("0000000000000000000000000000000000000010");
+ auto maxHash = dht::InfoHash("0100000000000000000000000000000000000000");
+ CPPUNIT_ASSERT_EQUAL(dht::InfoHash::commonBits(nullHash, nullHash), (unsigned)160);
+ CPPUNIT_ASSERT_EQUAL(dht::InfoHash::commonBits(nullHash, minHash), (unsigned)155);
+ CPPUNIT_ASSERT_EQUAL(dht::InfoHash::commonBits(nullHash, maxHash), (unsigned)7);
+ CPPUNIT_ASSERT_EQUAL(dht::InfoHash::commonBits(minHash, maxHash), (unsigned)7);
+}
+
+void
+InfoHashTester::testXorCmp() {
+ auto nullHash = dht::InfoHash();
+ auto minHash = dht::InfoHash("0000000000000000000000000000000000000010");
+ auto maxHash = dht::InfoHash("0100000000000000000000000000000000000000");
+ CPPUNIT_ASSERT_EQUAL(minHash.xorCmp(nullHash, maxHash), -1);
+ CPPUNIT_ASSERT_EQUAL(minHash.xorCmp(maxHash, nullHash), 1);
+ CPPUNIT_ASSERT_EQUAL(minHash.xorCmp(minHash, maxHash), -1);
+ CPPUNIT_ASSERT_EQUAL(minHash.xorCmp(maxHash, minHash), 1);
+ CPPUNIT_ASSERT_EQUAL(nullHash.xorCmp(minHash, maxHash), -1);
+ CPPUNIT_ASSERT_EQUAL(nullHash.xorCmp(maxHash, minHash), 1);
+ // Because hashes are circular in distance.
+ CPPUNIT_ASSERT_EQUAL(maxHash.xorCmp(nullHash, minHash), -1);
+ CPPUNIT_ASSERT_EQUAL(maxHash.xorCmp(minHash, nullHash), 1);
+}
+
+void
+InfoHashTester::tearDown() {
+
+}
+} // namespace test
--- /dev/null
+/*
+ * Copyright (C) 2018 Savoir-faire Linux Inc.
+ *
+ * Author: Sébastien Blin <sebastien.blin@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+// cppunit
+#include <cppunit/TestFixture.h>
+#include <cppunit/extensions/HelperMacros.h>
+
+namespace test {
+
+class InfoHashTester : public CppUnit::TestFixture {
+ CPPUNIT_TEST_SUITE(InfoHashTester);
+ CPPUNIT_TEST(testConstructors);
+ CPPUNIT_TEST(testComperators);
+ CPPUNIT_TEST(testLowBit);
+ CPPUNIT_TEST(testCommonBits);
+ CPPUNIT_TEST(testXorCmp);
+ CPPUNIT_TEST_SUITE_END();
+
+ public:
+ /**
+ * Method automatically called before each test by CppUnit
+ */
+ void setUp();
+ /**
+ * Method automatically called after each test CppUnit
+ */
+ void tearDown();
+ /**
+ * Test the differents behaviors of constructors
+ */
+ void testConstructors();
+ /**
+ * Test compare operators
+ */
+ void testComperators();
+ /**
+ * Test lowbit method
+ */
+ void testLowBit();
+ /**
+ * Test commonBits method
+ */
+ void testCommonBits();
+ /**
+ * Test xorCmp operators
+ */
+ void testXorCmp();
+
+};
+
+} // namespace test
--- /dev/null
+/*
+ * Copyright (C) 2017-2018 Savoir-faire Linux Inc.
+ * Author: Sébastien Blin <sebastien.blin@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <cppunit/extensions/TestFactoryRegistry.h>
+#include <cppunit/ui/text/TestRunner.h>
+#include <cppunit/CompilerOutputter.h>
+#include <iostream>
+
+int main(int argc, char** argv) {
+ CppUnit::TestFactoryRegistry ®istry = CppUnit::TestFactoryRegistry::getRegistry();
+ CppUnit::Test *suite = registry.makeTest();
+ if (suite->countTestCases() == 0) {
+ std::cout << "No test cases specified for suite" << std::endl;
+ return 1;
+ }
+ CppUnit::TextUi::TestRunner runner;
+ runner.addTest(suite);
+ auto result = runner.run() ? 0 : 1;
+ return result;
+}
--- /dev/null
+
+add_executable (dhtnode dhtnode.cpp tools_common.h)
+add_executable (dhtscanner dhtscanner.cpp tools_common.h)
+add_executable (dhtchat dhtchat.cpp tools_common.h)
+
+target_link_libraries (dhtnode LINK_PUBLIC readline)
+target_link_libraries (dhtscanner LINK_PUBLIC readline)
+target_link_libraries (dhtchat LINK_PUBLIC readline)
+
+if (OPENDHT_SHARED)
+ target_link_libraries (dhtnode LINK_PUBLIC opendht)
+ target_link_libraries (dhtscanner LINK_PUBLIC opendht)
+ target_link_libraries (dhtchat LINK_PUBLIC opendht)
+else ()
+ target_link_libraries (dhtnode LINK_PUBLIC opendht-static)
+ target_link_libraries (dhtscanner LINK_PUBLIC opendht-static)
+ target_link_libraries (dhtchat LINK_PUBLIC opendht-static)
+endif ()
+
+install (TARGETS dhtnode dhtscanner dhtchat RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
+
+if (OPENDHT_SYSTEMD)
+ execute_process(COMMAND ${PKG_CONFIG_EXECUTABLE} systemd --variable=systemdsystemunitdir
+ OUTPUT_VARIABLE SYSTEMD_UNIT_INSTALL_DIR)
+ string(REGEX REPLACE "[ \t\n]+" "" SYSTEMD_UNIT_INSTALL_DIR "${SYSTEMD_UNIT_INSTALL_DIR}")
+ set (systemdunitdir "${SYSTEMD_UNIT_INSTALL_DIR}")
+
+ configure_file (
+ systemd/dhtnode.service.in
+ systemd/dhtnode.service
+ @ONLY
+ )
+ install (FILES ${CMAKE_CURRENT_BINARY_DIR}/systemd/dhtnode.service DESTINATION ${systemdunitdir})
+ install (FILES systemd/dhtnode.conf DESTINATION ${sysconfdir})
+ if (OPENDHT_PYTHON)
+ configure_file (
+ systemd/dhtcluster.service.in
+ systemd/dhtcluster.service
+ @ONLY
+ )
+ install (FILES ${CMAKE_CURRENT_BINARY_DIR}/systemd/dhtcluster.service DESTINATION ${systemdunitdir})
+ install (FILES systemd/dhtcluster.conf DESTINATION ${sysconfdir})
+ endif()
+endif ()
--- /dev/null
+bin_PROGRAMS = dhtnode dhtchat dhtscanner
+noinst_HEADERS = tools_common.h
+
+AM_CPPFLAGS = -I../include @JsonCpp_CFLAGS@ @MsgPack_CFLAGS@
+
+dhtnode_SOURCES = dhtnode.cpp
+dhtnode_LDFLAGS = -lopendht -lreadline -L@top_builddir@/src/.libs @Argon2_LDFLAGS@ @GnuTLS_LIBS@
+
+dhtchat_SOURCES = dhtchat.cpp
+dhtchat_LDFLAGS = -lopendht -lreadline -L@top_builddir@/src/.libs @Argon2_LDFLAGS@ @GnuTLS_LIBS@
+
+dhtscanner_SOURCES = dhtscanner.cpp
+dhtscanner_LDFLAGS = -lopendht -lreadline -L@top_builddir@/src/.libs @Argon2_LDFLAGS@ @GnuTLS_LIBS@
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ *
+ * Author: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "tools_common.h"
+#include <opendht/rng.h>
+
+extern "C" {
+#include <gnutls/gnutls.h>
+}
+#include <ctime>
+
+using namespace dht;
+
+static std::mt19937_64 rd {dht::crypto::random_device{}()};
+static std::uniform_int_distribution<dht::Value::Id> rand_id;
+
+const std::string printTime(const std::time_t& now) {
+ struct tm tstruct = *localtime(&now);
+ char buf[80];
+ strftime(buf, sizeof(buf), "%Y-%m-%d %X", &tstruct);
+ return buf;
+}
+
+void print_node_info(const DhtRunner& dht, const dht_params&) {
+ std::cout << "OpenDht node " << dht.getNodeId() << " running on port " << dht.getBoundPort() << std::endl;
+ std::cout << "Public key ID " << dht.getId() << std::endl;
+}
+
+void print_usage() {
+ std::cout << "Usage: dhtchat [-n network_id] [-p local_port] [-b bootstrap_host[:port]]" << std::endl << std::endl;
+ std::cout << "dhtchat, a simple OpenDHT command line chat client." << std::endl;
+ std::cout << "Report bugs to: https://opendht.net" << std::endl;
+}
+
+int
+main(int argc, char **argv)
+{
+ auto params = parseArgs(argc, argv);
+ if (params.help) {
+ print_usage();
+ return 0;
+ }
+#ifdef WIN32_NATIVE
+ gnutls_global_init();
+#endif
+
+ DhtRunner dht;
+ try {
+ dht.run(params.port, dht::crypto::generateIdentity("DHT Chat Node"), true, params.network);
+
+ if (params.log) {
+ if (params.syslog)
+ log::enableSyslog(dht, "dhtnode");
+ else if (not params.logfile.empty())
+ log::enableFileLogging(dht, params.logfile);
+ else
+ log::enableLogging(dht);
+ }
+
+ if (not params.bootstrap.first.empty())
+ dht.bootstrap(params.bootstrap.first.c_str(), params.bootstrap.second.c_str());
+
+#if OPENDHT_PROXY_CLIENT
+ if (!params.proxyclient.empty()) {
+ dht.setProxyServer(params.proxyclient);
+ dht.enableProxy(true);
+ }
+#endif //OPENDHT_PROXY_CLIENT
+
+ print_node_info(dht, params);
+ std::cout << " type 'c {hash}' to join a channel" << std::endl << std::endl;
+
+ bool connected {false};
+ InfoHash room;
+ const InfoHash myid = dht.getId();
+
+#ifndef WIN32_NATIVE
+ // using the GNU History API
+ using_history();
+#endif
+
+ while (true)
+ {
+ // using the GNU Readline API
+ std::string line = readLine(connected ? PROMPT : "> ");
+ if (!line.empty() && line[0] == '\0')
+ break;
+ if (line.empty())
+ continue;
+
+ std::istringstream iss(line);
+ std::string op, idstr;
+ iss >> op;
+ if (not connected) {
+ if (op == "x" || op == "q" || op == "exit" || op == "quit")
+ break;
+ else if (op == "c") {
+ iss >> idstr;
+ room = InfoHash(idstr);
+ if (not room) {
+ room = InfoHash::get(idstr);
+ std::cout << "Joining h(" << idstr << ") = " << room << std::endl;
+ }
+
+ dht.listen<dht::ImMessage>(room, [&](dht::ImMessage&& msg) {
+ if (msg.from != myid)
+ std::cout << msg.from.toString() << " at " << printTime(msg.date)
+ << " (took " << print_dt(std::chrono::system_clock::now() - std::chrono::system_clock::from_time_t(msg.date))
+ << "s) " << (msg.to == myid ? "ENCRYPTED ":"") << ": " << msg.id << " - " << msg.msg << std::endl;
+ return true;
+ });
+ connected = true;
+ } else {
+ std::cout << "Unknown command. Type 'c {hash}' to join a channel" << std::endl << std::endl;
+ }
+ } else {
+ auto now = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());
+ if (op == "d") {
+ connected = false;
+ continue;
+ } else if (op == "e") {
+ iss >> idstr;
+ std::getline(iss, line);
+ dht.putEncrypted(room, InfoHash(idstr), dht::ImMessage(rand_id(rd), std::move(line), now), [](bool ok) {
+ //dht.cancelPut(room, id);
+ if (not ok)
+ std::cout << "Message publishing failed !" << std::endl;
+ });
+ } else {
+ dht.putSigned(room, dht::ImMessage(rand_id(rd), std::move(line), now), [](bool ok) {
+ //dht.cancelPut(room, id);
+ if (not ok)
+ std::cout << "Message publishing failed !" << std::endl;
+ });
+ }
+ }
+ }
+ } catch(const std::exception&e) {
+ std::cerr << std::endl << e.what() << std::endl;
+ }
+
+ std::cout << std::endl << "Stopping node..." << std::endl;
+ dht.join();
+#ifdef WIN32_NATIVE
+ gnutls_global_deinit();
+#endif
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ *
+ * Authors: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ * Simon Désaulniers <simon.desaulniers@savoirfairelinux.com>
+ * Sébastien Blin <sebastien.blin@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "tools_common.h"
+extern "C" {
+#include <gnutls/gnutls.h>
+}
+
+#include <set>
+#include <thread> // std::this_thread::sleep_for
+
+using namespace dht;
+
+void print_usage() {
+ std::cout << "Usage: dhtnode [-v [-l logfile]] [-i] [-d] [-n network_id] [-p local_port] [-b bootstrap_host[:port]] [--proxyserver local_port]" << std::endl << std::endl;
+ std::cout << "dhtnode, a simple OpenDHT command line node runner." << std::endl;
+ std::cout << "Report bugs to: https://opendht.net" << std::endl;
+}
+
+void print_id_req() {
+ std::cout << "An identity is required to perform this operation (run with -i)" << std::endl;
+}
+
+void print_node_info(const std::shared_ptr<DhtRunner>& dht, const dht_params& params) {
+ std::cout << "OpenDht node " << dht->getNodeId() << " running on port " << dht->getBoundPort() << std::endl;
+ if (params.generate_identity)
+ std::cout << "Public key ID " << dht->getId() << std::endl;
+}
+
+void print_help() {
+ std::cout << "OpenDht command line interface (CLI)" << std::endl;
+ std::cout << "Possible commands:" << std::endl
+ << " h, help Print this help message." << std::endl
+ << " x, quit Quit the program." << std::endl
+ << " log Start/stop printing DHT logs." << std::endl;
+
+ std::cout << std::endl << "Node information:" << std::endl
+ << " ll Print basic information and stats about the current node." << std::endl
+ << " ls [key] Print basic information about current search(es)." << std::endl
+ << " ld [key] Print basic information about currenty stored values on this node (or key)." << std::endl
+ << " lr Print the full current routing table of this node." << std::endl;
+
+#if OPENDHT_PROXY_SERVER
+ std::cout << std::endl << "Operations with the proxy:" << std::endl
+#if OPENDHT_PUSH_NOTIFICATIONS
+ << " pst [port] <pushServer> Start the proxy interface on port." << std::endl
+#else
+ << " pst [port] Start the proxy interface on port." << std::endl
+#endif // OPENDHT_PUSH_NOTIFICATIONS
+ << " psp [port] Stop the proxy interface on port." << std::endl;
+#endif //OPENDHT_PROXY_SERVER
+
+#if OPENDHT_PROXY_CLIENT
+ std::cout << std::endl << "Operations with the proxy:" << std::endl
+#if OPENDHT_PUSH_NOTIFICATIONS
+ << " stt [server_address] <device_key> Start the proxy client." << std::endl
+ << " rs [token] Resubscribe to opendht." << std::endl
+ << " rp [token] Inject a push notification in Opendht." << std::endl
+#else
+ << " stt [server_address] Start the proxy client." << std::endl
+#endif // OPENDHT_PUSH_NOTIFICATIONS
+ << " stp Stop the proxy client." << std::endl;
+#endif //OPENDHT_PROXY_CLIENT
+
+ std::cout << std::endl << "Operations on the DHT:" << std::endl
+ << " b <ip:port> Ping potential node at given IP address/port." << std::endl
+ << " g <key> Get values at <key>." << std::endl
+ << " l <key> Listen for value changes at <key>." << std::endl
+ << " cl <key> <token> Cancel listen for <token> and <key>." << std::endl
+ << " p <key> <str> Put string value at <key>." << std::endl
+ << " pp <key> <str> Put string value at <key> (persistent version)." << std::endl
+ << " cpp <key> <id> Cancel persistent put operation for <key> and value <id>." << std::endl
+ << " s <key> <str> Put string value at <key>, signed with our generated private key." << std::endl
+ << " e <key> <dest> <str> Put string value at <key>, encrypted for <dest> with its public key (if found)." << std::endl
+ << " cc Trigger connectivity changed signal." << std::endl;
+
+#ifdef OPENDHT_INDEXATION
+ std::cout << std::endl << "Indexation operations on the DHT:" << std::endl
+ << " il <name> <key> [exact match] Lookup the index named <name> with the key <key>." << std::endl
+ << " Set [exact match] to 'false' for inexact match lookup." << std::endl
+ << " ii <name> <key> <value> Inserts the value <value> under the key <key> in the index named <name>." << std::endl
+ << std::endl;
+#endif
+}
+
+void cmd_loop(std::shared_ptr<DhtRunner>& dht, dht_params& params
+#if OPENDHT_PROXY_SERVER
+ , std::map<in_port_t, std::unique_ptr<DhtProxyServer>>& proxies
+#endif
+)
+{
+ print_node_info(dht, params);
+ std::cout << " (type 'h' or 'help' for a list of possible commands)" << std::endl << std::endl;
+
+#ifndef WIN32_NATIVE
+ // using the GNU History API
+ using_history();
+#endif
+
+#ifdef OPENDHT_INDEXATION
+ std::map<std::string, indexation::Pht> indexes;
+#endif
+
+ while (true)
+ {
+ // using the GNU Readline API
+ std::string line = readLine();
+ if (!line.empty() && line[0] == '\0')
+ break;
+
+ std::istringstream iss(line);
+ std::string op, idstr, value, index, keystr, pushServer, deviceKey;
+ iss >> op;
+
+ if (op == "x" || op == "exit" || op == "quit") {
+ break;
+ } else if (op == "h" || op == "help") {
+ print_help();
+ continue;
+ } else if (op == "ll") {
+ print_node_info(dht, params);
+ std::cout << "IPv4 stats:" << std::endl;
+ std::cout << dht->getNodesStats(AF_INET).toString() << std::endl;
+ std::cout << "IPv6 stats:" << std::endl;
+ std::cout << dht->getNodesStats(AF_INET6).toString() << std::endl;
+#if OPENDHT_PROXY_SERVER
+ for (const auto& proxy : proxies) {
+ std::cout << "Stats for proxy on port " << proxy.first << std::endl;
+ std::cout << " " << proxy.second->stats().toString() << std::endl;
+ }
+#endif
+ continue;
+ } else if (op == "lr") {
+ std::cout << "IPv4 routing table:" << std::endl;
+ std::cout << dht->getRoutingTablesLog(AF_INET) << std::endl;
+ std::cout << "IPv6 routing table:" << std::endl;
+ std::cout << dht->getRoutingTablesLog(AF_INET6) << std::endl;
+ continue;
+ } else if (op == "ld") {
+ iss >> idstr;
+ InfoHash filter(idstr);
+ if (filter)
+ std::cout << dht->getStorageLog(filter) << std::endl;
+ else
+ std::cout << dht->getStorageLog() << std::endl;
+ continue;
+ } else if (op == "ls") {
+ iss >> idstr;
+ InfoHash filter(idstr);
+ if (filter)
+ std::cout << dht->getSearchLog(filter) << std::endl;
+ else
+ std::cout << dht->getSearchesLog() << std::endl;
+ continue;
+ } else if (op == "la") {
+ std::cout << "Reported public addresses:" << std::endl;
+ auto addrs = dht->getPublicAddressStr();
+ for (const auto& addr : addrs)
+ std::cout << addr << std::endl;
+ continue;
+ } else if (op == "b") {
+ iss >> idstr;
+ try {
+ auto addr = splitPort(idstr);
+ if (not addr.first.empty() and addr.second.empty())
+ addr.second = std::to_string(DHT_DEFAULT_PORT);
+ dht->bootstrap(addr.first.c_str(), addr.second.c_str());
+ } catch (const std::exception& e) {
+ std::cerr << e.what() << std::endl;
+ }
+ continue;
+ } else if (op == "log") {
+ iss >> idstr;
+ InfoHash filter(idstr);
+ params.log = filter == InfoHash{} ? !params.log : true;
+ if (params.log)
+ log::enableLogging(*dht);
+ else
+ log::disableLogging(*dht);
+ dht->setLogFilter(filter);
+ continue;
+ } else if (op == "cc") {
+ dht->connectivityChanged();
+ continue;
+ }
+#if OPENDHT_PROXY_SERVER
+ else if (op == "pst") {
+#if OPENDHT_PUSH_NOTIFICATIONS
+ iss >> idstr >> pushServer;
+#else
+ iss >> idstr;
+#endif // OPENDHT_PUSH_NOTIFICATIONS
+ try {
+ unsigned int port = std::stoi(idstr);
+#if OPENDHT_PUSH_NOTIFICATIONS
+ proxies.emplace(port, std::unique_ptr<DhtProxyServer>(new DhtProxyServer(dht, port, pushServer)));
+#else
+ proxies.emplace(port, std::unique_ptr<DhtProxyServer>(new DhtProxyServer(dht, port)));
+#endif // OPENDHT_PUSH_NOTIFICATIONS
+ } catch (...) { }
+ continue;
+ } else if (op == "psp") {
+ iss >> idstr;
+ try {
+ auto it = proxies.find(std::stoi(idstr));
+ if (it != proxies.end())
+ proxies.erase(it);
+ } catch (...) { }
+ continue;
+ }
+#endif //OPENDHT_PROXY_SERVER
+#if OPENDHT_PROXY_CLIENT
+ else if (op == "stt") {
+ dht->enableProxy(true);
+ continue;
+ } else if (op == "stp") {
+ dht->enableProxy(false);
+ continue;
+ }
+#if OPENDHT_PUSH_NOTIFICATIONS
+ else if (op == "rp") {
+ iss >> value;
+ dht->pushNotificationReceived({{"to", "dhtnode"}, {"token", value}});
+ continue;
+ }
+#endif // OPENDHT_PUSH_NOTIFICATIONS
+#endif //OPENDHT_PROXY_CLIENT
+
+ if (op.empty())
+ continue;
+
+ static const std::set<std::string> VALID_OPS {"g", "l", "cl", "il", "ii", "p", "pp", "cpp", "s", "e", "a", "q"};
+ if (VALID_OPS.find(op) == VALID_OPS.cend()) {
+ std::cout << "Unknown command: " << op << std::endl;
+ std::cout << " (type 'h' or 'help' for a list of possible commands)" << std::endl;
+ continue;
+ }
+ dht::InfoHash id;
+
+ if (false) {}
+#ifdef OPENDHT_INDEXATION
+ else if (op == "il" or op == "ii") {
+ // Pht syntax
+ iss >> index >> keystr;
+ auto new_index = std::find_if(indexes.begin(), indexes.end(),
+ [&](std::pair<const std::string, indexation::Pht>& i) {
+ return i.first == index;
+ }) == indexes.end();
+ if (not index.size()) {
+ std::cerr << "You must enter the index name." << std::endl;
+ continue;
+ } else if (new_index) {
+ using namespace dht::indexation;
+ try {
+ auto key = createPhtKey(parseStringMap(keystr));
+ Pht::KeySpec ks;
+ std::transform(key.begin(), key.end(), std::inserter(ks, ks.end()), [](Pht::Key::value_type& f) {
+ return std::make_pair(f.first, f.second.size());
+ });
+ indexes.emplace(index, Pht {index, std::move(ks), dht});
+ } catch (std::invalid_argument& e) { std::cout << e.what() << std::endl; }
+ }
+ }
+#endif
+ else {
+ // Dht syntax
+ iss >> idstr;
+ id = dht::InfoHash(idstr);
+ if (not id) {
+ if (idstr.empty()) {
+ std::cerr << "Syntax error: invalid InfoHash." << std::endl;
+ continue;
+ }
+ id = InfoHash::get(idstr);
+ std::cout << "Using h(" << idstr << ") = " << id << std::endl;
+ }
+ }
+
+ // Dht
+ auto start = std::chrono::high_resolution_clock::now();
+ if (op == "g") {
+ std::string rem;
+ std::getline(iss, rem);
+ dht->get(id, [start](std::shared_ptr<Value> value) {
+ auto now = std::chrono::high_resolution_clock::now();
+ std::cout << "Get: found value (after " << print_dt(now-start) << "s)" << std::endl;
+ std::cout << "\t" << *value << std::endl;
+ return true;
+ }, [start](bool ok) {
+ auto end = std::chrono::high_resolution_clock::now();
+ std::cout << "Get: " << (ok ? "completed" : "failure") << " (took " << print_dt(end-start) << "s)" << std::endl;
+ }, {}, dht::Where {std::move(rem)});
+ }
+ else if (op == "q") {
+ std::string rem;
+ std::getline(iss, rem);
+ dht->query(id, [start](const std::vector<std::shared_ptr<FieldValueIndex>>& field_value_indexes) {
+ auto now = std::chrono::high_resolution_clock::now();
+ for (auto& index : field_value_indexes) {
+ std::cout << "Query: found field value index (after " << print_dt(now-start) << "s)" << std::endl;
+ std::cout << "\t" << *index << std::endl;
+ }
+ return true;
+ }, [start](bool ok) {
+ auto end = std::chrono::high_resolution_clock::now();
+ std::cout << "Query: " << (ok ? "completed" : "failure") << " (took " << print_dt(end-start) << "s)" << std::endl;
+ }, dht::Query {std::move(rem)});
+ }
+ else if (op == "l") {
+ std::string rem;
+ std::getline(iss, rem);
+ auto token = dht->listen(id, [](const std::vector<std::shared_ptr<Value>>& values, bool expired) {
+ std::cout << "Listen: found " << values.size() << " values" << (expired ? " expired" : "") << std::endl;
+ for (const auto& value : values)
+ std::cout << "\t" << *value << std::endl;
+ return true;
+ }, {}, dht::Where {std::move(rem)});
+ auto t = token.get();
+ std::cout << "Listening, token: " << t << std::endl;
+ }
+ if (op == "cl") {
+ std::string rem;
+ iss >> rem;
+ size_t token;
+ try {
+ token = std::stoul(rem);
+ } catch(...) {
+ std::cerr << "Syntax: cl [key] [token]" << std::endl;
+ continue;
+ }
+ dht->cancelListen(id, token);
+ }
+ else if (op == "p") {
+ std::string v;
+ iss >> v;
+ dht->put(id, dht::Value {
+ dht::ValueType::USER_DATA.id,
+ std::vector<uint8_t> {v.begin(), v.end()}
+ }, [start](bool ok) {
+ auto end = std::chrono::high_resolution_clock::now();
+ std::cout << "Put: " << (ok ? "success" : "failure") << " (took " << print_dt(end-start) << "s)" << std::endl;
+ });
+ }
+ else if (op == "pp") {
+ std::string v;
+ iss >> v;
+ auto value = std::make_shared<dht::Value>(
+ dht::ValueType::USER_DATA.id,
+ std::vector<uint8_t> {v.begin(), v.end()}
+ );
+ dht->put(id, value, [start,value](bool ok) {
+ auto end = std::chrono::high_resolution_clock::now();
+ auto flags(std::cout.flags());
+ std::cout << "Put: " << (ok ? "success" : "failure") << " (took " << print_dt(end-start) << "s). Value ID: " << std::hex << value->id << std::endl;
+ std::cout.flags(flags);
+ }, time_point::max(), true);
+ }
+ else if (op == "cpp") {
+ std::string rem;
+ iss >> rem;
+ dht->cancelPut(id, std::stoul(rem, nullptr, 16));
+ }
+ else if (op == "s") {
+ if (not params.generate_identity) {
+ print_id_req();
+ continue;
+ }
+ std::string v;
+ iss >> v;
+ dht->putSigned(id, dht::Value {
+ dht::ValueType::USER_DATA.id,
+ std::vector<uint8_t> {v.begin(), v.end()}
+ }, [start](bool ok) {
+ auto end = std::chrono::high_resolution_clock::now();
+ std::cout << "Put signed: " << (ok ? "success" : "failure") << " (took " << print_dt(end-start) << "s)" << std::endl;
+ });
+ }
+ else if (op == "e") {
+ if (not params.generate_identity) {
+ print_id_req();
+ continue;
+ }
+ std::string tostr;
+ std::string v;
+ iss >> tostr >> v;
+ dht->putEncrypted(id, InfoHash(tostr), dht::Value {
+ dht::ValueType::USER_DATA.id,
+ std::vector<uint8_t> {v.begin(), v.end()}
+ }, [start](bool ok) {
+ auto end = std::chrono::high_resolution_clock::now();
+ std::cout << "Put encrypted: " << (ok ? "success" : "failure") << " (took " << print_dt(end-start) << "s)" << std::endl;
+ });
+ }
+ else if (op == "a") {
+ in_port_t port;
+ iss >> port;
+ dht->put(id, dht::Value {dht::IpServiceAnnouncement::TYPE.id, dht::IpServiceAnnouncement(port)}, [start](bool ok) {
+ auto end = std::chrono::high_resolution_clock::now();
+ std::cout << "Announce: " << (ok ? "success" : "failure") << " (took " << print_dt(end-start) << "s)" << std::endl;
+ });
+ }
+#ifdef OPENDHT_INDEXATION
+ else if (op == "il") {
+ std::string exact_match;
+ iss >> exact_match;
+ try {
+ auto key = createPhtKey(parseStringMap(keystr));
+ indexes.at(index).lookup(key,
+ [=](std::vector<std::shared_ptr<indexation::Value>>& vals, indexation::Prefix p) {
+ if (vals.empty())
+ return;
+ std::cout << "Pht::lookup: found entries!" << std::endl
+ << p.toString() << std::endl
+ << " hash: " << p.hash() << std::endl;
+ std::cout << " entries:" << std::endl;
+ for (auto v : vals)
+ std::cout << " " << v->first.toString() << "[vid: " << v->second << "]" << std::endl;
+ },
+ [start](bool ok) {
+ auto end = std::chrono::high_resolution_clock::now();
+ std::cout << "Pht::lookup: " << (ok ? "done." : "failed.")
+ << " took " << print_dt(end-start) << "s)" << std::endl;
+
+ }, exact_match.size() != 0 and exact_match == "false" ? false : true
+ );
+ }
+ catch (std::invalid_argument& e) { std::cout << e.what() << std::endl; }
+ catch (std::out_of_range& e) { }
+ }
+ else if (op == "ii") {
+ iss >> idstr;
+ InfoHash h {idstr};
+ if (not isInfoHash(h))
+ continue;
+
+ indexation::Value v {h, 0};
+ try {
+ auto key = createPhtKey(parseStringMap(keystr));
+ indexes.at(index).insert(key, v,
+ [=](bool ok) {
+ std::cout << "Pht::insert: " << (ok ? "done." : "failed.") << std::endl;
+ }
+ );
+ }
+ catch (std::invalid_argument& e) { std::cout << e.what() << std::endl; }
+ catch (std::out_of_range& e) { }
+ }
+#endif
+ }
+
+ std::cout << std::endl << "Stopping node..." << std::endl;
+}
+
+int
+main(int argc, char **argv)
+{
+#ifdef WIN32_NATIVE
+ gnutls_global_init();
+#endif
+
+ auto dht = std::make_shared<DhtRunner>();
+
+ try {
+ auto params = parseArgs(argc, argv);
+ if (params.help) {
+ print_usage();
+ return 0;
+ }
+
+ if (params.daemonize) {
+ daemonize();
+ } else if (params.service) {
+ setupSignals();
+ }
+
+ dht::crypto::Identity crt {};
+ if (params.generate_identity) {
+ auto ca_tmp = dht::crypto::generateEcIdentity("DHT Node CA");
+ crt = dht::crypto::generateIdentity("DHT Node", ca_tmp);
+ }
+
+ dht::DhtRunner::Config config {};
+ config.dht_config.node_config.network = params.network;
+ config.dht_config.node_config.maintain_storage = false;
+ config.dht_config.id = crt;
+ config.threaded = true;
+ config.proxy_server = params.proxyclient;
+ config.push_node_id = "dhtnode";
+ if (not params.proxyclient.empty())
+ dht->setPushNotificationToken(params.devicekey);
+
+ dht->run(params.port, config);
+
+ if (params.log) {
+ if (params.syslog or (params.daemonize and params.logfile.empty()))
+ log::enableSyslog(*dht, "dhtnode");
+ else if (not params.logfile.empty())
+ log::enableFileLogging(*dht, params.logfile);
+ else
+ log::enableLogging(*dht);
+ }
+
+ if (not params.bootstrap.first.empty()) {
+ //std::cout << "Bootstrap: " << params.bootstrap.first << ":" << params.bootstrap.second << std::endl;
+ dht->bootstrap(params.bootstrap.first.c_str(), params.bootstrap.second.c_str());
+ }
+
+#if OPENDHT_PROXY_SERVER
+ std::map<in_port_t, std::unique_ptr<DhtProxyServer>> proxies;
+#endif
+ if (params.proxyserver != 0) {
+#if OPENDHT_PROXY_SERVER
+ proxies.emplace(params.proxyserver, std::unique_ptr<DhtProxyServer>(new DhtProxyServer(dht, params.proxyserver, params.pushserver)));
+#else
+ std::cerr << "DHT proxy server requested but OpenDHT built without proxy server support." << std::endl;
+ exit(EXIT_FAILURE);
+#endif
+ }
+
+ if (params.daemonize or params.service)
+ while (runner.wait());
+ else
+ cmd_loop(dht, params
+#if OPENDHT_PROXY_SERVER
+ , proxies
+#endif
+ );
+
+ } catch(const std::exception&e) {
+ std::cerr << std::endl << e.what() << std::endl;
+ }
+
+ std::condition_variable cv;
+ std::mutex m;
+ std::atomic_bool done {false};
+
+ dht->shutdown([&]()
+ {
+ std::lock_guard<std::mutex> lk(m);
+ done = true;
+ cv.notify_all();
+ });
+
+ // wait for shutdown
+ std::unique_lock<std::mutex> lk(m);
+ cv.wait(lk, [&](){ return done.load(); });
+
+ dht->join();
+#ifdef WIN32_NATIVE
+ gnutls_global_deinit();
+#endif
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ *
+ * Author: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "tools_common.h"
+#include <opendht/node.h>
+
+extern "C" {
+#include <gnutls/gnutls.h>
+}
+#include <set>
+#include <condition_variable>
+#include <mutex>
+
+using namespace dht;
+
+void print_usage() {
+ std::cout << "Usage: dhtscanner [-n network_id] [-p local_port] [-b bootstrap_host[:port]]" << std::endl << std::endl;
+ std::cout << "dhtscanner, a simple OpenDHT command line utility generating scan result the network." << std::endl;
+ std::cout << "Report bugs to: https://opendht.net" << std::endl;
+}
+
+struct snode_compare {
+ bool operator() (const std::shared_ptr<Node>& lhs, const std::shared_ptr<Node>& rhs) const{
+ return (lhs->id < rhs->id) ||
+ (lhs->id == rhs->id && lhs->getFamily() == AF_INET && rhs->getFamily() == AF_INET6);
+ }
+};
+
+using NodeSet = std::set<std::shared_ptr<Node>, snode_compare>;
+std::condition_variable cv;
+
+void
+step(DhtRunner& dht, std::atomic_uint& done, std::shared_ptr<NodeSet> all_nodes, dht::InfoHash cur_h, unsigned cur_depth)
+{
+ std::cout << "step at " << cur_h << ", depth " << cur_depth << std::endl;
+ done++;
+ dht.get(cur_h, [all_nodes](const std::vector<std::shared_ptr<Value>>& /*values*/) {
+ return true;
+ }, [&,all_nodes,cur_h,cur_depth](bool, const std::vector<std::shared_ptr<Node>>& nodes) {
+ all_nodes->insert(nodes.begin(), nodes.end());
+ NodeSet sbuck {nodes.begin(), nodes.end()};
+ if (not sbuck.empty()) {
+ unsigned bdepth = sbuck.size()==1 ? 0u : InfoHash::commonBits((*sbuck.begin())->id, (*sbuck.rbegin())->id);
+ unsigned target_depth = std::min(8u, bdepth+6u);
+ std::cout << cur_h << " : " << nodes.size() << " nodes; target is " << target_depth << " bits deep (cur " << cur_depth << ")" << std::endl;
+ for (unsigned b = cur_depth ; b < target_depth; b++) {
+ auto new_h = cur_h;
+ new_h.setBit(b, 1);
+ step(dht, done, all_nodes, new_h, b+1);
+ }
+ }
+ done--;
+ std::cout << done.load() << " operations left, " << all_nodes->size() << " nodes found." << std::endl;
+ cv.notify_one();
+ });
+}
+
+int
+main(int argc, char **argv)
+{
+#ifdef WIN32_NATIVE
+ gnutls_global_init();
+#endif
+ auto params = parseArgs(argc, argv);
+ if (params.help) {
+ print_usage();
+ return 0;
+ }
+
+ DhtRunner dht;
+ try {
+ dht.run(params.port, {}, true, params.network);
+
+ if (params.log) {
+ if (params.syslog)
+ log::enableSyslog(dht, "dhtnode");
+ else if (not params.logfile.empty())
+ log::enableFileLogging(dht, params.logfile);
+ else
+ log::enableLogging(dht);
+ }
+
+ if (not params.bootstrap.first.empty())
+ dht.bootstrap(params.bootstrap.first.c_str(), params.bootstrap.second.c_str());
+
+ std::cout << "OpenDht node " << dht.getNodeId() << " running on port " << params.port << std::endl;
+ std::cout << "Scanning network..." << std::endl;
+ auto all_nodes = std::make_shared<NodeSet>();
+
+ // Set hash to 1 because 0 is the null hash
+ dht::InfoHash cur_h {};
+ cur_h.setBit(8*HASH_LEN-1, 1);
+
+ std::this_thread::sleep_for(std::chrono::seconds(2));
+
+ std::atomic_uint done {0};
+ step(dht, done, all_nodes, cur_h, 0);
+
+ {
+ std::mutex m;
+ std::unique_lock<std::mutex> lk(m);
+ cv.wait(lk, [&](){
+ return done.load() == 0;
+ });
+ }
+
+ std::cout << std::endl << "Scan ended: " << all_nodes->size() << " nodes found." << std::endl;
+ for (const auto& n : *all_nodes)
+ std::cout << "Node " << *n << std::endl;
+ } catch(const std::exception&e) {
+ std::cerr << std::endl << e.what() << std::endl;
+ }
+
+ dht.join();
+#ifdef WIN32_NATIVE
+ gnutls_global_deinit();
+#endif
+ return 0;
+}
--- /dev/null
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ <meta charset="utf-8" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
+ <title>OpenDHT tester</title>
+ <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/css/bootstrap.min.css" />
+ <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.css" />
+ <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script>
+ <script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/js/bootstrap.min.js"></script>
+ <script type="text/javascript">"use strict";
+let onGet;
+let onPut;
+let setServer;
+const valueGetElement = function(o) {
+ const d = window.atob(o.data);
+ return d;
+};
+$(function() {
+ let request = undefined;
+ let server;
+ const getTools = $("#getTools");
+ const getBtn = $("#getBtn");
+ const getDropdown = $("#getDropdown");
+ const listenBtn = $("#listenBtn").click(function(){onGet('LISTEN');});
+ const setGetRequest = function() {
+ getBtn.button('loading');
+ getStopBtn.appendTo(getTools);
+ getDropdown.hide();
+ }
+ const clearGetRequest = function() {
+ if (request === undefined)
+ return;
+ request.abort();
+ request = undefined;
+ getStopBtn.detach();
+ getDropdown.show();
+ getBtn.button('reset');
+ }
+ const getStopBtn = $("#getStopBtn").detach().click(clearGetRequest);
+ const putBtn = $("#putBtn");
+ const result = $("#dhtResult");
+ const group = $('<ul class="list-group"/>').appendTo(result);
+ onGet = function (method) {
+ if (request !== undefined)
+ return false;
+ const input = $("#getKey").val();
+ group.empty();
+ let lastAppended = 0;
+ let start = new Date().getTime();
+ request = new XMLHttpRequest();
+ request.onreadystatechange = function(event) {
+ if (this.readyState >= XMLHttpRequest.LOADING) {
+ if (this.readyState == XMLHttpRequest.DONE) {
+ clearGetRequest();
+ }
+ if (this.status === 200) {
+ const elements = this.responseText.split("\n");
+ const elementsLength = elements.length;
+ const now = new Date().getTime();
+ for (let i = lastAppended; i < elementsLength; i++) {
+ const element = elements[i];
+ if (!element || element.length == 0)
+ return;
+ const o = JSON.parse(element);
+ if (o.expired) {
+ $('#value'+o.id).slideUp(100, (e) => $(e.target).remove());
+ } else {
+ const d = window.atob(o.data);
+ const delay = Math.max(0, start-now);
+ $('<li class="list-group-item" id="value'+o.id+'"/>').append(valueGetElement(o)).appendTo(group).hide().delay(delay).slideDown(100);
+ lastAppended = i+1;
+ start = Math.max(start, now)+25;
+ }
+ }
+ } else if (this.status !== 0) {
+ group.empty().append($('<li class="list-group-item list-group-item-danger"/>').text("Error loading content: " + this.statusText));
+ }
+ }
+ };
+ request.onerror = function(event) {
+ clearGetRequest();
+ group.empty().append($('<li class="list-group-item list-group-item-danger"/>').text("Error loading content."));
+ };
+ request.open(method, server + input, true);
+ request.send(null);
+ setGetRequest();
+ return false;
+ };
+
+ onPut = function( ) {
+ const key = $("#getKey").val();
+ const value = $("#putValue").val();
+ $.ajax({
+ url: server + key,
+ type: 'POST',
+ data: JSON.stringify({
+ data:window.btoa(value)
+ }),
+ contentType: 'application/json; charset=utf-8',
+ dataType: 'json',
+ success: function( result ) {
+ putBtn.button('reset');
+ //$('<li class="list-group-item list-group-item-success"/>').append(valueGetElement(result)).appendTo(group.empty());
+ },
+ error: function(result) {
+ putBtn.button('reset');
+ group.empty().append($('<li class="list-group-item list-group-item-danger"/>').text(result.statusText));
+ }
+ });
+ putBtn.button('loading');
+ return false;
+ };
+
+ const serverValue = $("#serverValue");
+ const serverStatus = $("#serverStatus");
+ const serverBtn = $("#serverBtn");
+ setServer = function(event) {
+ server = 'http://'+serverValue.val() + '/';
+ serverStatus.empty();
+ serverBtn.button('loading');
+ $.getJSON(server, function(data){
+ $('<span><b>Node</b> '+data.node_id+'</span>').appendTo(serverStatus).hide().fadeIn();
+ }).fail(function(error) {
+ serverStatus.html("<div class='alert alert-danger' style='margin-bottom: 0px;'><span class='glyphicon glyphicon-remove' aria-hidden='true'></span> Can't access node</div>");
+ }).always(function(error) {
+ serverBtn.button('reset');
+ });
+ return false;
+ };
+ setServer();
+});
+ </script>
+</head>
+<body>
+ <div class="container" style="max-width: 730px;">
+ <header class="page-header">
+ <div class="row">
+ <div class="col-sm-5">
+ <h1>OpenDHT tester</h1>
+ </div>
+ <div class="col-sm-7">
+ <div class="well well-sm" style="margin-top:10px; margin-bottom:0px;">
+ <form id="serverForm" class="form-inline" onsubmit="return setServer();" style="margin-bottom:4px;">
+ <div class="input-group">
+ <input type="text" class="form-control" id="serverValue" placeholder="Proxy server" value="127.0.0.1:8080"/>
+ <span class="input-group-btn">
+ <button id="serverBtn" type="submit" class="btn btn-default" data-loading-text="<i class='fa fa-circle-o-notch fa-spin'></i>"><span class="glyphicon glyphicon-refresh" aria-hidden="true"></span></button>
+ </span>
+ </div>
+ </form>
+ <div id="serverStatus"><i class='fa fa-circle-o-notch fa-spin'></i></div>
+ </div>
+ </div>
+ </div>
+ </header>
+ <div class="panel panel-default" id="dhtResult">
+ <div class="panel-heading">
+ <div class="row">
+ <div class="col-xs-6">
+ <form class="form-inline" onsubmit="return onGet('GET');">
+ <div class="input-group">
+ <input type="text" class="form-control" id="getKey" placeholder="Key" aria-label="Key" />
+ <span class="input-group-btn" id="getTools">
+ <button id="getBtn" class="btn btn-default" data-loading-text="<i class='fa fa-circle-o-notch fa-spin'></i>" type="submit">Get</button>
+ <button id="getDropdown"type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false"> <span class="caret"></span> <span class="sr-only">Toggle Dropdown</span> </button>
+ <ul class="dropdown-menu">
+ <li><a id="listenBtn" href="#">Listen</a></li>
+ </ul>
+ <button id="getStopBtn" class="btn btn-default" type="submit"><span class="glyphicon glyphicon-remove" aria-hidden="true"></span></button>
+ </span>
+ </div>
+ </form>
+ </div>
+ <div class="col-xs-6">
+ <form class="form-inline" onsubmit="return onPut();">
+ <div class="input-group">
+ <input type="text" class="form-control input-group-input" id="putValue" placeholder="Value" />
+ <span class="input-group-btn">
+ <button id="putBtn" type="submit" class="btn btn-default" data-loading-text="<i class='fa fa-circle-o-notch fa-spin'></i> Loading">Put</button>
+ </span>
+ </div>
+ </form>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+</body>
+</html>
--- /dev/null
+DHT_ARGS=-b bootstrap.ring.cx -p 4222 -n 16
\ No newline at end of file
--- /dev/null
+[Unit]
+Description=OpenDHT node cluster
+After=network.target
+
+[Service]
+EnvironmentFile=@sysconfdir@/dhtcluster.conf
+ExecStart=@bindir@/dhtcluster -s $DHT_ARGS
+KillMode=process
+Restart=on-failure
+Type=simple
+ProtectSystem=strict
+ProtectHome=yes
+ProtectKernelTunables=yes
+ProtectKernelModules=yes
+ProtectControlGroups=yes
+PrivateDevices=yes
+PrivateUsers=yes
+
+[Install]
+WantedBy=multi-user.target
--- /dev/null
+DHT_ARGS=-b bootstrap.ring.cx -p 4222 -v
\ No newline at end of file
--- /dev/null
+[Unit]
+Description=OpenDHT standalone node
+After=network.target
+
+[Service]
+EnvironmentFile=@sysconfdir@/dhtnode.conf
+ExecStart=@bindir@/dhtnode -d $DHT_ARGS
+KillMode=process
+Restart=on-failure
+Type=forking
+DynamicUser=yes
+
+[Install]
+WantedBy=multi-user.target
--- /dev/null
+/*
+ * Copyright (C) 2014-2017 Savoir-faire Linux Inc.
+ *
+ * Author: Adrien Béraud <adrien.beraud@savoirfairelinux.com>
+ * Author: Sébastien Blin <sebastien.blin@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+// Common utility methods used by C++ OpenDHT tools.
+#pragma once
+
+#include <opendht.h>
+#ifndef WIN32_NATIVE
+#include <getopt.h>
+#include <readline/readline.h>
+#include <readline/history.h>
+#else
+#define SIGHUP 0
+#include "wingetopt.h"
+#endif
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <signal.h>
+#include <unistd.h>
+
+#include <string>
+#include <vector>
+#include <chrono>
+#include <mutex>
+#include <condition_variable>
+#include <iostream>
+#include <sstream>
+#include <fstream>
+
+/*
+ * The mapString shall have the following format:
+ *
+ * k1:v1[,k2:v2[,...]]
+ */
+std::map<std::string, std::string> parseStringMap(std::string mapString) {
+ std::istringstream keySs(mapString);
+ std::string mapStr;
+ std::map<std::string, std::string> map;
+
+ while (std::getline(keySs, mapStr, ',')) {
+ std::istringstream mapSs(mapStr);
+ std::string key, value;
+
+ while (std::getline(mapSs, key, ':')) {
+ std::getline(mapSs, value, ':');
+ map[key] = { value };
+ }
+ }
+ return map;
+}
+
+#ifdef OPENDHT_INDEXATION
+dht::indexation::Pht::Key createPhtKey(std::map<std::string, std::string> pht_key_str_map) {
+ dht::indexation::Pht::Key pht_key;
+ for (auto f : pht_key_str_map) {
+ dht::Blob prefix {f.second.begin(), f.second.end()};
+ pht_key.emplace(f.first, std::move(prefix));
+ }
+ return pht_key;
+}
+#endif
+
+bool isInfoHash(const dht::InfoHash& h) {
+ if (not h) {
+ std::cout << "Syntax error: invalid InfoHash." << std::endl;
+ return false;
+ }
+ return true;
+}
+
+static const constexpr in_port_t DHT_DEFAULT_PORT = 4222;
+
+struct dht_params {
+ bool help {false}; // print help and exit
+ bool log {false};
+ std::string logfile {};
+ bool syslog {false};
+ in_port_t port {0};
+ dht::NetId network {0};
+ bool generate_identity {false};
+ bool daemonize {false};
+ bool service {false};
+ std::pair<std::string, std::string> bootstrap {};
+ in_port_t proxyserver {0};
+ std::string proxyclient {};
+ std::string pushserver {};
+ std::string devicekey {};
+};
+
+static const constexpr struct option long_options[] = {
+ {"help", no_argument , nullptr, 'h'},
+ {"port", required_argument, nullptr, 'p'},
+ {"net", required_argument, nullptr, 'n'},
+ {"bootstrap", required_argument, nullptr, 'b'},
+ {"identity", no_argument , nullptr, 'i'},
+ {"verbose", no_argument , nullptr, 'v'},
+ {"daemonize", no_argument , nullptr, 'd'},
+ {"service", no_argument , nullptr, 's'},
+ {"logfile", required_argument, nullptr, 'l'},
+ {"syslog", no_argument , nullptr, 'L'},
+ {"proxyserver",required_argument, nullptr, 'S'},
+ {"proxyclient",required_argument, nullptr, 'C'},
+ {"pushserver", required_argument, nullptr, 'P'},
+ {"devicekey", required_argument, nullptr, 'D'},
+ {nullptr, 0 , nullptr, 0}
+};
+
+dht_params
+parseArgs(int argc, char **argv) {
+ dht_params params;
+ int opt;
+ while ((opt = getopt_long(argc, argv, "hidsvp:n:b:l:", long_options, nullptr)) != -1) {
+ switch (opt) {
+ case 'p': {
+ int port_arg = atoi(optarg);
+ if (port_arg >= 0 && port_arg < 0x10000)
+ params.port = port_arg;
+ else
+ std::cout << "Invalid port: " << port_arg << std::endl;
+ }
+ break;
+ case 'S': {
+ int port_arg = atoi(optarg);
+ if (port_arg >= 0 && port_arg < 0x10000)
+ params.proxyserver = port_arg;
+ else
+ std::cout << "Invalid port: " << port_arg << std::endl;
+ }
+ break;
+ case 'P':
+ params.pushserver = optarg;
+ break;
+ case 'C':
+ params.proxyclient = optarg;
+ break;
+ case 'D':
+ params.devicekey = optarg;
+ break;
+ case 'n':
+ params.network = strtoul(optarg, nullptr, 0);
+ break;
+ case 'b':
+ params.bootstrap = dht::splitPort((optarg[0] == '=') ? optarg+1 : optarg);
+ if (not params.bootstrap.first.empty() and params.bootstrap.second.empty()) {
+ params.bootstrap.second = std::to_string(DHT_DEFAULT_PORT);
+ }
+ break;
+ case 'h':
+ params.help = true;
+ break;
+ case 'l':
+ params.logfile = optarg;
+ break;
+ case 'L':
+ params.log = true;
+ params.syslog = true;
+ break;
+ case 'v':
+ params.log = true;
+ break;
+ case 'i':
+ params.generate_identity = true;
+ break;
+ case 'd':
+ params.daemonize = true;
+ break;
+ case 's':
+ params.service = true;
+ break;
+ default:
+ break;
+ }
+ }
+ return params;
+}
+
+static const constexpr char* PROMPT = ">> ";
+
+std::string
+readLine(const char* prefix = PROMPT)
+{
+#ifndef WIN32_NATIVE
+ const char* line_read = readline(prefix);
+ if (line_read && *line_read)
+ add_history(line_read);
+
+#else
+ char line_read[512];
+ std::cout << PROMPT;
+ fgets(line_read, 512 , stdin);
+#endif
+ return line_read ? std::string(line_read) : std::string("\0", 1);
+}
+
+struct ServiceRunner {
+ bool wait() {
+ std::unique_lock<std::mutex> lock(m);
+ cv.wait(lock, [&]{return terminate;});
+ return !terminate;
+ }
+ void kill() {
+ std::lock_guard<std::mutex> lock(m);
+ terminate = true;
+ cv.notify_all();
+ }
+private:
+ std::condition_variable cv;
+ std::mutex m;
+ bool terminate = false;
+};
+
+ServiceRunner runner;
+
+void signal_handler(int sig)
+{
+ switch(sig) {
+ case SIGHUP:
+ break;
+ case SIGINT:
+ case SIGTERM:
+ runner.kill();
+ break;
+ }
+}
+
+void setupSignals()
+{
+#ifndef WIN32_NATIVE
+ signal(SIGCHLD,SIG_IGN); /* ignore child */
+ signal(SIGTSTP,SIG_IGN); /* ignore tty signals */
+ signal(SIGTTOU,SIG_IGN);
+ signal(SIGTTIN,SIG_IGN);
+ signal(SIGHUP,signal_handler); /* catch hangup signal */
+ signal(SIGINT,signal_handler); /* catch interrupt signal */
+ signal(SIGTERM,signal_handler); /* catch kill signal */
+#endif
+}
+
+void daemonize()
+{
+#ifndef WIN32_NATIVE
+ pid_t pid = fork();
+ if (pid < 0) exit(EXIT_FAILURE);
+ if (pid > 0) exit(EXIT_SUCCESS);
+
+ umask(0);
+
+ pid_t sid = setsid();
+ if (sid < 0) {
+ exit(EXIT_FAILURE);
+ }
+
+ if ((chdir("/")) < 0) {
+ exit(EXIT_FAILURE);
+ }
+
+ close(STDIN_FILENO);
+ close(STDOUT_FILENO);
+ close(STDERR_FILENO);
+
+ setupSignals();
+#endif
+}