diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 2d995edada4..9a1963b2904 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -3,9 +3,6 @@ contact_links: - name: XRP Ledger Documentation url: https://xrpl.org/ about: All things about XRPL - - name: General question for the community - url: https://forum.xpring.io/c/community/ - about: Please ask and answer questions here. - name: Security bug bounty program url: https://ripple.com/bug-bounty/ about: Please report security-relevant bugs in our software here. diff --git a/.gitignore b/.gitignore index 23d896b8e85..5c8b2443700 100644 --- a/.gitignore +++ b/.gitignore @@ -104,3 +104,5 @@ Builds/VisualStudio2015/*.sdf CMakeSettings.json compile_commands.json .clangd +packages +pkg_out diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index e0c627e2bd4..c01077a5aa2 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -395,7 +395,7 @@ target_sources (rippled PRIVATE src/ripple/app/paths/Pathfinder.cpp src/ripple/app/paths/RippleCalc.cpp src/ripple/app/paths/RippleLineCache.cpp - src/ripple/app/paths/RippleState.cpp + src/ripple/app/paths/TrustLine.cpp src/ripple/app/paths/impl/BookStep.cpp src/ripple/app/paths/impl/DirectStep.cpp src/ripple/app/paths/impl/PaySteps.cpp @@ -733,6 +733,7 @@ if (tests) src/test/basics/contract_test.cpp src/test/basics/FeeUnits_test.cpp src/test/basics/hardened_hash_test.cpp + src/test/basics/join_test.cpp src/test/basics/mulDiv_test.cpp src/test/basics/tagged_integer_test.cpp #[===============================[ @@ -891,6 +892,7 @@ if (tests) src/test/protocol/InnerObjectFormats_test.cpp src/test/protocol/Issue_test.cpp src/test/protocol/KnownFormatToGRPC_test.cpp + src/test/protocol/Hooks_test.cpp src/test/protocol/PublicKey_test.cpp src/test/protocol/Quality_test.cpp src/test/protocol/STAccount_test.cpp @@ -989,17 +991,18 @@ if (is_ci) target_compile_definitions(rippled PRIVATE RIPPLED_RUNNING_IN_CI) endif () -if (reporting) - target_compile_definitions(rippled PRIVATE RIPPLED_REPORTING) -endif () +if(reporting) +set_target_properties(rippled PROPERTIES OUTPUT_NAME rippled-reporting) +get_target_property(BIN_NAME rippled OUTPUT_NAME) +message(STATUS "Reporting mode build: rippled renamed ${BIN_NAME}") + target_compile_definitions(rippled PRIVATE RIPPLED_REPORTING) +endif() -if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.16) - # any files that don't play well with unity should be added here - if (tests) - set_source_files_properties( - # these two seem to produce conflicts in beast teardown template methods - src/test/rpc/ValidatorRPC_test.cpp - src/test/rpc/ShardArchiveHandler_test.cpp - PROPERTIES SKIP_UNITY_BUILD_INCLUSION TRUE) - endif () #tests -endif () +# any files that don't play well with unity should be added here +if (tests) + set_source_files_properties( + # these two seem to produce conflicts in beast teardown template methods + src/test/rpc/ValidatorRPC_test.cpp + src/test/rpc/ShardArchiveHandler_test.cpp + PROPERTIES SKIP_UNITY_BUILD_INCLUSION TRUE) +endif () #tests diff --git a/Builds/CMake/RippledRelease.cmake b/Builds/CMake/RippledRelease.cmake index b10bf6cf023..3be93658255 100644 --- a/Builds/CMake/RippledRelease.cmake +++ b/Builds/CMake/RippledRelease.cmake @@ -48,12 +48,15 @@ if (is_root_project) Builds/containers/centos-builder/Dockerfile Builds/containers/centos-builder/centos_setup.sh Builds/containers/centos-builder/extras.sh - Builds/containers/shared/build_deps.sh - Builds/containers/shared/rippled.service - Builds/containers/shared/update_sources.sh Builds/containers/shared/update-rippled.sh + Builds/containers/shared/update_sources.sh + Builds/containers/shared/rippled.service + Builds/containers/shared/rippled-reporting.service + Builds/containers/shared/build_deps.sh Builds/containers/packaging/rpm/rippled.spec Builds/containers/packaging/rpm/build_rpm.sh + Builds/containers/packaging/rpm/50-rippled.preset + Builds/containers/packaging/rpm/50-rippled-reporting.preset bin/getRippledInfo ) exclude_from_default (rpm_container) @@ -86,7 +89,7 @@ if (is_root_project) add_custom_target (dpkg_container docker build --pull - --build-arg DIST_TAG=16.04 + --build-arg DIST_TAG=18.04 --build-arg GIT_COMMIT=${commit_hash} -t rippled-dpkg-builder:${container_label} $<$:--cache-from=${dpkg_cache_from}> @@ -96,28 +99,40 @@ if (is_root_project) USES_TERMINAL COMMAND_EXPAND_LISTS SOURCES - Builds/containers/ubuntu-builder/Dockerfile - Builds/containers/ubuntu-builder/ubuntu_setup.sh - Builds/containers/shared/build_deps.sh - Builds/containers/shared/rippled.service - Builds/containers/shared/update_sources.sh - Builds/containers/shared/update-rippled.sh - Builds/containers/packaging/dpkg/build_dpkg.sh - Builds/containers/packaging/dpkg/debian/README.Debian - Builds/containers/packaging/dpkg/debian/conffiles - Builds/containers/packaging/dpkg/debian/control + Builds/containers/packaging/dpkg/debian/rippled-reporting.links Builds/containers/packaging/dpkg/debian/copyright - Builds/containers/packaging/dpkg/debian/dirs - Builds/containers/packaging/dpkg/debian/docs - Builds/containers/packaging/dpkg/debian/rippled-dev.install - Builds/containers/packaging/dpkg/debian/rippled.install + Builds/containers/packaging/dpkg/debian/rules + Builds/containers/packaging/dpkg/debian/rippled-reporting.install + Builds/containers/packaging/dpkg/debian/rippled-reporting.postinst Builds/containers/packaging/dpkg/debian/rippled.links + Builds/containers/packaging/dpkg/debian/rippled.prerm Builds/containers/packaging/dpkg/debian/rippled.postinst + Builds/containers/packaging/dpkg/debian/rippled-dev.install + Builds/containers/packaging/dpkg/debian/dirs Builds/containers/packaging/dpkg/debian/rippled.postrm + Builds/containers/packaging/dpkg/debian/rippled.conffiles + Builds/containers/packaging/dpkg/debian/compat + Builds/containers/packaging/dpkg/debian/source/format + Builds/containers/packaging/dpkg/debian/source/local-options + Builds/containers/packaging/dpkg/debian/README.Debian + Builds/containers/packaging/dpkg/debian/rippled.install Builds/containers/packaging/dpkg/debian/rippled.preinst - Builds/containers/packaging/dpkg/debian/rippled.prerm - Builds/containers/packaging/dpkg/debian/rules + Builds/containers/packaging/dpkg/debian/docs + Builds/containers/packaging/dpkg/debian/control + Builds/containers/packaging/dpkg/debian/rippled-reporting.dirs + Builds/containers/packaging/dpkg/build_dpkg.sh + Builds/containers/ubuntu-builder/Dockerfile + Builds/containers/ubuntu-builder/ubuntu_setup.sh bin/getRippledInfo + Builds/containers/shared/install_cmake.sh + Builds/containers/shared/install_boost.sh + Builds/containers/shared/update-rippled.sh + Builds/containers/shared/update_sources.sh + Builds/containers/shared/build_deps.sh + Builds/containers/shared/rippled.service + Builds/containers/shared/rippled-reporting.service + Builds/containers/shared/rippled-logrotate + Builds/containers/shared/update-rippled-cron ) exclude_from_default (dpkg_container) add_custom_target (dpkg @@ -187,4 +202,3 @@ if (is_root_project) message (STATUS "docker NOT found -- won't be able to build containers for packaging") endif () endif () - diff --git a/Builds/CMake/RippledSettings.cmake b/Builds/CMake/RippledSettings.cmake index 5fcc9441a84..0cdfb8e84db 100644 --- a/Builds/CMake/RippledSettings.cmake +++ b/Builds/CMake/RippledSettings.cmake @@ -10,13 +10,8 @@ option (tests "Build tests" ON) option (unity "Creates a build using UNITY support in cmake. This is the default" ON) if (unity) - if (CMAKE_VERSION VERSION_LESS 3.16) - message (WARNING "unity option only supported for with cmake 3.16+ (please upgrade)") - set (unity OFF CACHE BOOL "unity only available for cmake 3.16+" FORCE) - else () - if (NOT is_ci) - set (CMAKE_UNITY_BUILD_BATCH_SIZE 15 CACHE STRING "") - endif () + if (NOT is_ci) + set (CMAKE_UNITY_BUILD_BATCH_SIZE 15 CACHE STRING "") endif () endif () if (is_gcc OR is_clang) diff --git a/Builds/CMake/RippledValidatorKeys.cmake b/Builds/CMake/RippledValidatorKeys.cmake index 2cf71f22365..dfd56f9f182 100644 --- a/Builds/CMake/RippledValidatorKeys.cmake +++ b/Builds/CMake/RippledValidatorKeys.cmake @@ -1,6 +1,6 @@ option (validator_keys "Enables building of validator-keys-tool as a separate target (imported via FetchContent)" OFF) -if (validator_keys AND CMAKE_VERSION VERSION_GREATER_EQUAL 3.11) +if (validator_keys) git_branch (current_branch) # default to tracking VK develop branch unless we are on master/release if (NOT (current_branch STREQUAL "master" OR current_branch STREQUAL "release")) @@ -20,5 +20,3 @@ if (validator_keys AND CMAKE_VERSION VERSION_GREATER_EQUAL 3.11) endif () add_subdirectory (${validator_keys_src_SOURCE_DIR} ${CMAKE_BINARY_DIR}/validator-keys) endif () - - diff --git a/Builds/CMake/deps/Libarchive.cmake b/Builds/CMake/deps/Libarchive.cmake index 760f6403afd..57b8d2e395b 100644 --- a/Builds/CMake/deps/Libarchive.cmake +++ b/Builds/CMake/deps/Libarchive.cmake @@ -125,7 +125,7 @@ if (local_libarchive) --build . --config $ --target archive_static - $<$:--parallel ${ep_procs}> + --parallel ${ep_procs} $<$: COMMAND ${CMAKE_COMMAND} -E copy diff --git a/Builds/CMake/deps/Lz4.cmake b/Builds/CMake/deps/Lz4.cmake index f5e6fa5acf0..15d890692c5 100644 --- a/Builds/CMake/deps/Lz4.cmake +++ b/Builds/CMake/deps/Lz4.cmake @@ -43,7 +43,7 @@ else() --build . --config $ --target lz4_static - $<$:--parallel ${ep_procs}> + --parallel ${ep_procs} $<$: COMMAND ${CMAKE_COMMAND} -E copy diff --git a/Builds/CMake/deps/Nudb.cmake b/Builds/CMake/deps/Nudb.cmake index 8bdb0c06f89..73ab58898a9 100644 --- a/Builds/CMake/deps/Nudb.cmake +++ b/Builds/CMake/deps/Nudb.cmake @@ -8,40 +8,24 @@ if (is_root_project) # NuDB not needed in the case of xrpl_core inclusion build add_library (nudb INTERFACE) - if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.11) - FetchContent_Declare( - nudb_src - GIT_REPOSITORY https://github.com/CPPAlliance/NuDB.git - GIT_TAG 2.0.5 - ) - FetchContent_GetProperties(nudb_src) - if(NOT nudb_src_POPULATED) - message (STATUS "Pausing to download NuDB...") - FetchContent_Populate(nudb_src) - endif() - else () - ExternalProject_Add (nudb_src - PREFIX ${nih_cache_path} - GIT_REPOSITORY https://github.com/CPPAlliance/NuDB.git - GIT_TAG 2.0.5 - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - TEST_COMMAND "" - INSTALL_COMMAND "" - ) - ExternalProject_Get_Property (nudb_src SOURCE_DIR) - set (nudb_src_SOURCE_DIR "${SOURCE_DIR}") - file (MAKE_DIRECTORY ${nudb_src_SOURCE_DIR}/include) - add_dependencies (nudb nudb_src) - endif () + FetchContent_Declare( + nudb_src + GIT_REPOSITORY https://github.com/CPPAlliance/NuDB.git + GIT_TAG 2.0.5 + ) + FetchContent_GetProperties(nudb_src) + if(NOT nudb_src_POPULATED) + message (STATUS "Pausing to download NuDB...") + FetchContent_Populate(nudb_src) + endif() +endif () - file(TO_CMAKE_PATH "${nudb_src_SOURCE_DIR}" nudb_src_SOURCE_DIR) +file(TO_CMAKE_PATH "${nudb_src_SOURCE_DIR}" nudb_src_SOURCE_DIR) # specify as system includes so as to avoid warnings - target_include_directories (nudb SYSTEM INTERFACE ${nudb_src_SOURCE_DIR}/include) - target_link_libraries (nudb - INTERFACE - Boost::thread - Boost::system) - add_library (NIH::nudb ALIAS nudb) - target_link_libraries (ripple_libs INTERFACE NIH::nudb) -endif () +target_include_directories (nudb SYSTEM INTERFACE ${nudb_src_SOURCE_DIR}/include) +target_link_libraries (nudb + INTERFACE + Boost::thread + Boost::system) +add_library (NIH::nudb ALIAS nudb) +target_link_libraries (ripple_libs INTERFACE NIH::nudb) diff --git a/Builds/CMake/deps/Protobuf.cmake b/Builds/CMake/deps/Protobuf.cmake index 52cbd3f5ab9..35d5b9f0ff7 100644 --- a/Builds/CMake/deps/Protobuf.cmake +++ b/Builds/CMake/deps/Protobuf.cmake @@ -65,7 +65,7 @@ if (local_protobuf OR NOT (Protobuf_FOUND AND Protobuf_PROTOC_EXECUTABLE AND pro ${CMAKE_COMMAND} --build . --config $ - $<$:--parallel ${ep_procs}> + --parallel ${ep_procs} TEST_COMMAND "" INSTALL_COMMAND ${CMAKE_COMMAND} -E env --unset=DESTDIR ${CMAKE_COMMAND} --build . --config $ --target install diff --git a/Builds/CMake/deps/Rocksdb.cmake b/Builds/CMake/deps/Rocksdb.cmake index 6a33a8d2b57..2c832c593f5 100644 --- a/Builds/CMake/deps/Rocksdb.cmake +++ b/Builds/CMake/deps/Rocksdb.cmake @@ -136,7 +136,7 @@ if (local_rocksdb) ${CMAKE_COMMAND} --build . --config $ - $<$:--parallel ${ep_procs}> + --parallel ${ep_procs} $<$: COMMAND ${CMAKE_COMMAND} -E copy diff --git a/Builds/CMake/deps/Snappy.cmake b/Builds/CMake/deps/Snappy.cmake index 9792bec1b5e..331ac2fbe95 100644 --- a/Builds/CMake/deps/Snappy.cmake +++ b/Builds/CMake/deps/Snappy.cmake @@ -42,7 +42,7 @@ else() ${CMAKE_COMMAND} --build . --config $ - $<$:--parallel ${ep_procs}> + --parallel ${ep_procs} $<$: COMMAND ${CMAKE_COMMAND} -E copy diff --git a/Builds/CMake/deps/Soci.cmake b/Builds/CMake/deps/Soci.cmake index 4015a3f2dea..fa05a115756 100644 --- a/Builds/CMake/deps/Soci.cmake +++ b/Builds/CMake/deps/Soci.cmake @@ -113,7 +113,7 @@ else() ${CMAKE_COMMAND} --build . --config $ - $<$:--parallel ${ep_procs}> + --parallel ${ep_procs} $<$: COMMAND ${CMAKE_COMMAND} -E copy diff --git a/Builds/CMake/deps/Sqlite.cmake b/Builds/CMake/deps/Sqlite.cmake index a2d0bad5ac7..7b34c1121f2 100644 --- a/Builds/CMake/deps/Sqlite.cmake +++ b/Builds/CMake/deps/Sqlite.cmake @@ -56,7 +56,7 @@ else() ${CMAKE_COMMAND} --build . --config $ - $<$:--parallel ${ep_procs}> + --parallel ${ep_procs} $<$: COMMAND ${CMAKE_COMMAND} -E copy diff --git a/Builds/CMake/deps/cassandra.cmake b/Builds/CMake/deps/cassandra.cmake index ca19bd528ff..8f1e799dc18 100644 --- a/Builds/CMake/deps/cassandra.cmake +++ b/Builds/CMake/deps/cassandra.cmake @@ -112,6 +112,8 @@ if(reporting) -DLIBUV_LIBARY=${BINARY_DIR}/libuv_a.a -DLIBUV_INCLUDE_DIR=${SOURCE_DIR}/include -DCASS_BUILD_STATIC=ON + -DCASS_BUILD_SHARED=OFF + -DOPENSSL_ROOT_DIR=/opt/local/openssl INSTALL_COMMAND "" BUILD_BYPRODUCTS /${ep_lib_prefix}cassandra_static.a LOG_BUILD TRUE diff --git a/Builds/CMake/deps/date.cmake b/Builds/CMake/deps/date.cmake index 0f15e7d181c..b9155c26475 100644 --- a/Builds/CMake/deps/date.cmake +++ b/Builds/CMake/deps/date.cmake @@ -9,41 +9,10 @@ find_package (date QUIET) if (NOT TARGET date::date) - if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.14) - FetchContent_Declare( - hh_date_src - GIT_REPOSITORY https://github.com/HowardHinnant/date.git - GIT_TAG fc4cf092f9674f2670fb9177edcdee870399b829 - ) - FetchContent_MakeAvailable(hh_date_src) - else () - ExternalProject_Add (hh_date_src - PREFIX ${nih_cache_path} - GIT_REPOSITORY https://github.com/HowardHinnant/date.git - GIT_TAG fc4cf092f9674f2670fb9177edcdee870399b829 - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - TEST_COMMAND "" - INSTALL_COMMAND "" - ) - ExternalProject_Get_Property (hh_date_src SOURCE_DIR) - set (hh_date_src_SOURCE_DIR "${SOURCE_DIR}") - file (MAKE_DIRECTORY ${hh_date_src_SOURCE_DIR}/include) - add_library (date_interface INTERFACE) - add_library (date::date ALIAS date_interface) - add_dependencies (date_interface hh_date_src) - file (TO_CMAKE_PATH "${hh_date_src_SOURCE_DIR}" hh_date_src_SOURCE_DIR) - target_include_directories (date_interface - SYSTEM INTERFACE - $ - $) - install ( - FILES - ${hh_date_src_SOURCE_DIR}/include/date/date.h - DESTINATION include/date) - install (TARGETS date_interface - EXPORT RippleExports - INCLUDES DESTINATION include) - endif () + FetchContent_Declare( + hh_date_src + GIT_REPOSITORY https://github.com/HowardHinnant/date.git + GIT_TAG fc4cf092f9674f2670fb9177edcdee870399b829 + ) + FetchContent_MakeAvailable(hh_date_src) endif () - diff --git a/Builds/CMake/deps/gRPC.cmake b/Builds/CMake/deps/gRPC.cmake index 92a75107140..8dd09417563 100644 --- a/Builds/CMake/deps/gRPC.cmake +++ b/Builds/CMake/deps/gRPC.cmake @@ -112,7 +112,7 @@ else () ${CMAKE_COMMAND} --build . --config $ - $<$:--parallel ${ep_procs}> + --parallel ${ep_procs} TEST_COMMAND "" INSTALL_COMMAND ${CMAKE_COMMAND} -E env --unset=DESTDIR ${CMAKE_COMMAND} --build . --config $ --target install @@ -169,7 +169,7 @@ else () ${CMAKE_COMMAND} --build . --config $ - $<$:--parallel ${ep_procs}> + --parallel ${ep_procs} TEST_COMMAND "" INSTALL_COMMAND ${CMAKE_COMMAND} -E env --unset=DESTDIR ${CMAKE_COMMAND} --build . --config $ --target install @@ -237,7 +237,7 @@ else () ${CMAKE_COMMAND} --build . --config $ - $<$:--parallel ${ep_procs}> + --parallel ${ep_procs} $<$: COMMAND ${CMAKE_COMMAND} -E copy diff --git a/Builds/containers/gitlab-ci/build_package.sh b/Builds/containers/gitlab-ci/build_package.sh index 9d815be4572..4c591677fd2 100644 --- a/Builds/containers/gitlab-ci/build_package.sh +++ b/Builds/containers/gitlab-ci/build_package.sh @@ -22,6 +22,7 @@ time cmake \ -Dpackages_only=ON \ -Dcontainer_label="${container_tag}" \ -Dhave_package_container=ON \ - -DCMAKE_VERBOSE_MAKEFILE=OFF \ + -DCMAKE_VERBOSE_MAKEFILE=ON \ + -Dunity=OFF \ -G Ninja ../.. -time cmake --build . --target ${pkgtype} +time cmake --build . --target ${pkgtype} -- -v diff --git a/Builds/containers/gitlab-ci/docker_alpine_setup.sh b/Builds/containers/gitlab-ci/docker_alpine_setup.sh index 00cf6eb5fa5..a36543192d1 100644 --- a/Builds/containers/gitlab-ci/docker_alpine_setup.sh +++ b/Builds/containers/gitlab-ci/docker_alpine_setup.sh @@ -1,5 +1,5 @@ #!/usr/bin/env sh -set -ex +set -e # used as a before/setup script for docker steps in gitlab-ci # expects to be run in standard alpine/dind image echo $(nproc) @@ -13,4 +13,3 @@ apk add \ pip3 install awscli # list curdir contents to build log: ls -la - diff --git a/Builds/containers/gitlab-ci/push_to_artifactory.sh b/Builds/containers/gitlab-ci/push_to_artifactory.sh index fd7f3575f91..847c2bc1427 100644 --- a/Builds/containers/gitlab-ci/push_to_artifactory.sh +++ b/Builds/containers/gitlab-ci/push_to_artifactory.sh @@ -1,5 +1,5 @@ #!/usr/bin/env sh -set -ex +set -e action=$1 filter=$2 diff --git a/Builds/containers/gitlab-ci/smoketest.sh b/Builds/containers/gitlab-ci/smoketest.sh index e0f4f432beb..b233e6959c7 100644 --- a/Builds/containers/gitlab-ci/smoketest.sh +++ b/Builds/containers/gitlab-ci/smoketest.sh @@ -1,5 +1,5 @@ #!/usr/bin/env sh -set -ex +set -e install_from=$1 use_private=${2:-0} # this option not currently needed by any CI scripts, # reserved for possible future use diff --git a/Builds/containers/gitlab-ci/tag_docker_image.sh b/Builds/containers/gitlab-ci/tag_docker_image.sh index e9d4fbb6378..66259059536 100644 --- a/Builds/containers/gitlab-ci/tag_docker_image.sh +++ b/Builds/containers/gitlab-ci/tag_docker_image.sh @@ -1,5 +1,5 @@ #!/usr/bin/env sh -set -ex +set -e docker login -u rippled \ -p ${ARTIFACTORY_DEPLOY_KEY_RIPPLED} "${ARTIFACTORY_HUB}" # this gives us rippled_version : @@ -19,4 +19,3 @@ for label in ${rippled_version} latest ; do docker push \ "${ARTIFACTORY_HUB}/${DPKG_CONTAINER_NAME}:${label}_${CI_COMMIT_REF_SLUG}" done - diff --git a/Builds/containers/packaging/dpkg/build_dpkg.sh b/Builds/containers/packaging/dpkg/build_dpkg.sh index f407f6bc39d..3c73f1314e2 100755 --- a/Builds/containers/packaging/dpkg/build_dpkg.sh +++ b/Builds/containers/packaging/dpkg/build_dpkg.sh @@ -4,7 +4,7 @@ set -ex # make sure pkg source files are up to date with repo cd /opt/rippled_bld/pkg cp -fpru rippled/Builds/containers/packaging/dpkg/debian/. debian/ -cp -fpu rippled/Builds/containers/shared/rippled.service debian/ +cp -fpu rippled/Builds/containers/shared/rippled*.service debian/ cp -fpu rippled/Builds/containers/shared/update_sources.sh . source update_sources.sh @@ -52,14 +52,15 @@ rc=$?; if [[ $rc != 0 ]]; then error "error building dpkg" fi cd .. -ls -latr # copy artifacts cp rippled-dev_${RIPPLED_DPKG_FULL_VERSION}_amd64.deb ${PKG_OUTDIR} +cp rippled-reporting_${RIPPLED_DPKG_FULL_VERSION}_amd64.deb ${PKG_OUTDIR} cp rippled_${RIPPLED_DPKG_FULL_VERSION}_amd64.deb ${PKG_OUTDIR} cp rippled_${RIPPLED_DPKG_FULL_VERSION}.dsc ${PKG_OUTDIR} # dbgsym suffix is ddeb under newer debuild, but just deb under earlier cp rippled-dbgsym_${RIPPLED_DPKG_FULL_VERSION}_amd64.* ${PKG_OUTDIR} +cp rippled-reporting-dbgsym_${RIPPLED_DPKG_FULL_VERSION}_amd64.* ${PKG_OUTDIR} cp rippled_${RIPPLED_DPKG_FULL_VERSION}_amd64.changes ${PKG_OUTDIR} cp rippled_${RIPPLED_DPKG_FULL_VERSION}_amd64.build ${PKG_OUTDIR} cp rippled_${RIPPLED_DPKG_VERSION}.orig.tar.gz ${PKG_OUTDIR} @@ -81,15 +82,20 @@ DEB_SHA256=$(cat shasums | \ grep "rippled_${RIPPLED_DPKG_VERSION}-1_amd64.deb" | cut -d " " -f 1) DBG_SHA256=$(cat shasums | \ grep "rippled-dbgsym_${RIPPLED_DPKG_VERSION}-1_amd64.*" | cut -d " " -f 1) +REPORTING_DBG_SHA256=$(cat shasums | \ + grep "rippled-reporting-dbgsym_${RIPPLED_DPKG_VERSION}-1_amd64.*" | cut -d " " -f 1) DEV_SHA256=$(cat shasums | \ grep "rippled-dev_${RIPPLED_DPKG_VERSION}-1_amd64.deb" | cut -d " " -f 1) +REPORTING_SHA256=$(cat shasums | \ + grep "rippled-reporting_${RIPPLED_DPKG_VERSION}-1_amd64.deb" | cut -d " " -f 1) SRC_SHA256=$(cat shasums | \ grep "rippled_${RIPPLED_DPKG_VERSION}.orig.tar.gz" | cut -d " " -f 1) echo "deb_sha256=${DEB_SHA256}" >> ${PKG_OUTDIR}/build_vars echo "dbg_sha256=${DBG_SHA256}" >> ${PKG_OUTDIR}/build_vars echo "dev_sha256=${DEV_SHA256}" >> ${PKG_OUTDIR}/build_vars +echo "reporting_sha256=${REPORTING_SHA256}" >> ${PKG_OUTDIR}/build_vars +echo "reporting_dbg_sha256=${REPORTING_DBG_SHA256}" >> ${PKG_OUTDIR}/build_vars echo "src_sha256=${SRC_SHA256}" >> ${PKG_OUTDIR}/build_vars echo "rippled_version=${RIPPLED_VERSION}" >> ${PKG_OUTDIR}/build_vars echo "dpkg_version=${RIPPLED_DPKG_VERSION}" >> ${PKG_OUTDIR}/build_vars echo "dpkg_full_version=${RIPPLED_DPKG_FULL_VERSION}" >> ${PKG_OUTDIR}/build_vars - diff --git a/Builds/containers/packaging/dpkg/debian/compat b/Builds/containers/packaging/dpkg/debian/compat index ec635144f60..f599e28b8ab 100644 --- a/Builds/containers/packaging/dpkg/debian/compat +++ b/Builds/containers/packaging/dpkg/debian/compat @@ -1 +1 @@ -9 +10 diff --git a/Builds/containers/packaging/dpkg/debian/control b/Builds/containers/packaging/dpkg/debian/control index 1b380569e02..7e55143334c 100644 --- a/Builds/containers/packaging/dpkg/debian/control +++ b/Builds/containers/packaging/dpkg/debian/control @@ -12,6 +12,12 @@ Multi-Arch: foreign Depends: ${misc:Depends}, ${shlibs:Depends} Description: rippled daemon +Package: rippled-reporting +Architecture: any +Multi-Arch: foreign +Depends: ${misc:Depends}, ${shlibs:Depends} +Description: rippled reporting daemon + Package: rippled-dev Section: devel Recommends: rippled (= ${binary:Version}) diff --git a/Builds/containers/packaging/dpkg/debian/rippled-reporting.dirs b/Builds/containers/packaging/dpkg/debian/rippled-reporting.dirs new file mode 100644 index 00000000000..0f5cdbd40a7 --- /dev/null +++ b/Builds/containers/packaging/dpkg/debian/rippled-reporting.dirs @@ -0,0 +1,3 @@ +/var/log/rippled-reporting/ +/var/lib/rippled-reporting/ +/etc/systemd/system/rippled-reporting.service.d/ \ No newline at end of file diff --git a/Builds/containers/packaging/dpkg/debian/rippled-reporting.install b/Builds/containers/packaging/dpkg/debian/rippled-reporting.install new file mode 100644 index 00000000000..255c7b0b5c4 --- /dev/null +++ b/Builds/containers/packaging/dpkg/debian/rippled-reporting.install @@ -0,0 +1,8 @@ +bld/rippled-reporting/rippled-reporting opt/rippled-reporting/bin +cfg/rippled-reporting.cfg opt/rippled-reporting/etc +debian/tmp/opt/rippled-reporting/etc/validators.txt opt/rippled-reporting/etc + +opt/rippled-reporting/bin/update-rippled-reporting.sh +opt/rippled-reporting/bin/getRippledReportingInfo +opt/rippled-reporting/etc/update-rippled-reporting-cron +etc/logrotate.d/rippled-reporting \ No newline at end of file diff --git a/Builds/containers/packaging/dpkg/debian/rippled-reporting.links b/Builds/containers/packaging/dpkg/debian/rippled-reporting.links new file mode 100644 index 00000000000..ab83b0c816d --- /dev/null +++ b/Builds/containers/packaging/dpkg/debian/rippled-reporting.links @@ -0,0 +1,3 @@ +opt/rippled-reporting/etc/rippled-reporting.cfg etc/opt/rippled-reporting/rippled-reporting.cfg +opt/rippled-reporting/etc/validators.txt etc/opt/rippled-reporting/validators.txt +opt/rippled-reporting/bin/rippled-reporting usr/local/bin/rippled-reporting diff --git a/Builds/containers/packaging/dpkg/debian/rippled-reporting.postinst b/Builds/containers/packaging/dpkg/debian/rippled-reporting.postinst new file mode 100644 index 00000000000..64044197344 --- /dev/null +++ b/Builds/containers/packaging/dpkg/debian/rippled-reporting.postinst @@ -0,0 +1,33 @@ +#!/bin/sh +set -e + +USER_NAME=rippled-reporting +GROUP_NAME=rippled-reporting +case "$1" in + configure) + id -u $USER_NAME >/dev/null 2>&1 || \ + adduser --system --quiet \ + --home /nonexistent --no-create-home \ + --disabled-password \ + --group "$GROUP_NAME" + chown -R $USER_NAME:$GROUP_NAME /var/log/rippled-reporting/ + chown -R $USER_NAME:$GROUP_NAME /var/lib/rippled-reporting/ + chmod 755 /var/log/rippled-reporting/ + chmod 755 /var/lib/rippled-reporting/ + chown -R $USER_NAME:$GROUP_NAME /opt/rippled-reporting + + ;; + + abort-upgrade|abort-remove|abort-deconfigure) + ;; + + *) + echo "postinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + + +#DEBHELPER# + +exit 0 diff --git a/Builds/containers/packaging/dpkg/debian/conffiles b/Builds/containers/packaging/dpkg/debian/rippled.conffiles similarity index 70% rename from Builds/containers/packaging/dpkg/debian/conffiles rename to Builds/containers/packaging/dpkg/debian/rippled.conffiles index 4facf4a3417..0c6d1c36d42 100644 --- a/Builds/containers/packaging/dpkg/debian/conffiles +++ b/Builds/containers/packaging/dpkg/debian/rippled.conffiles @@ -1,3 +1,2 @@ /opt/ripple/etc/rippled.cfg /opt/ripple/etc/validators.txt -/etc/logrotate.d/rippled diff --git a/Builds/containers/packaging/dpkg/debian/rippled.install b/Builds/containers/packaging/dpkg/debian/rippled.install index 3ce9f60fb3a..ac9d946294e 100644 --- a/Builds/containers/packaging/dpkg/debian/rippled.install +++ b/Builds/containers/packaging/dpkg/debian/rippled.install @@ -5,4 +5,4 @@ opt/ripple/bin/getRippledInfo opt/ripple/etc/rippled.cfg opt/ripple/etc/validators.txt opt/ripple/etc/update-rippled-cron -etc/logrotate.d/rippled +etc/logrotate.d/rippled \ No newline at end of file diff --git a/Builds/containers/packaging/dpkg/debian/rules b/Builds/containers/packaging/dpkg/debian/rules index bdf3c9a33dd..f7c8123bd1c 100755 --- a/Builds/containers/packaging/dpkg/debian/rules +++ b/Builds/containers/packaging/dpkg/debian/rules @@ -16,28 +16,46 @@ override_dh_systemd_start: override_dh_auto_configure: env - rm -rf bld - mkdir -p bld - cd bld && \ - cmake .. -G Ninja \ + rm -rf bld && mkdir -p bld/rippled + cd bld/rippled && \ + cmake ../.. -G Ninja \ -DCMAKE_INSTALL_PREFIX=/opt/ripple \ -DCMAKE_BUILD_TYPE=Release \ -Dstatic=ON \ -Dunity=OFF \ -Dvalidator_keys=ON \ + -Dunity=OFF \ -DCMAKE_VERBOSE_MAKEFILE=OFF + + cmake -S . \ + -B bld/rippled-reporting \ + -G Ninja \ + -DCMAKE_INSTALL_PREFIX=/opt/rippled-reporting \ + -DCMAKE_BUILD_TYPE=Release \ + -Dstatic=ON \ + -Dunity=OFF \ + -DCMAKE_VERBOSE_MAKEFILE=OFF \ + -Dreporting=ON + override_dh_auto_build: - cd bld && \ - cmake --build . --target rippled --target validator-keys --parallel + cmake --build bld/rippled --target rippled --target validator-keys --parallel + cmake --build bld/rippled-reporting --target rippled --parallel override_dh_auto_install: - cd bld && DESTDIR=../debian/tmp cmake --build . --target install - install -D bld/validator-keys/validator-keys debian/tmp/opt/ripple/bin/validator-keys + cmake --install bld/rippled --prefix debian/tmp/opt/ripple + install -D bld/rippled/validator-keys/validator-keys debian/tmp/opt/ripple/bin/validator-keys install -D Builds/containers/shared/update-rippled.sh debian/tmp/opt/ripple/bin/update-rippled.sh install -D bin/getRippledInfo debian/tmp/opt/ripple/bin/getRippledInfo install -D Builds/containers/shared/update-rippled-cron debian/tmp/opt/ripple/etc/update-rippled-cron install -D Builds/containers/shared/rippled-logrotate debian/tmp/etc/logrotate.d/rippled rm -rf debian/tmp/opt/ripple/lib64/cmake/date - rm -rf bld - rm -rf bld_vl + + mkdir -p debian/tmp/opt/rippled-reporting/etc + cp cfg/validators-example.txt debian/tmp/opt/rippled-reporting/etc/validators.txt + install -D bld/rippled/validator-keys/validator-keys debian/tmp/opt/rippled-reporting/bin/validator-keys + + sed -E 's/rippled?/rippled-reporting/g' Builds/containers/shared/update-rippled.sh > debian/tmp/opt/rippled-reporting/bin/update-rippled-reporting.sh + sed -E 's/rippled?/rippled-reporting/g' bin/getRippledInfo > debian/tmp/opt/rippled-reporting/bin/getRippledReportingInfo + sed -E 's/rippled?/rippled-reporting/g' Builds/containers/shared/update-rippled-cron > debian/tmp/opt/rippled-reporting/etc/update-rippled-reporting-cron + sed -E 's/rippled?/rippled-reporting/g' Builds/containers/shared/rippled-logrotate > debian/tmp/etc/logrotate.d/rippled-reporting \ No newline at end of file diff --git a/Builds/containers/packaging/rpm/50-rippled-reporting.preset b/Builds/containers/packaging/rpm/50-rippled-reporting.preset new file mode 100644 index 00000000000..50d16dd7acd --- /dev/null +++ b/Builds/containers/packaging/rpm/50-rippled-reporting.preset @@ -0,0 +1 @@ +enable rippled-reporting.service \ No newline at end of file diff --git a/Builds/containers/packaging/rpm/build_rpm.sh b/Builds/containers/packaging/rpm/build_rpm.sh index 7bf79071d0f..f0141e9ff53 100755 --- a/Builds/containers/packaging/rpm/build_rpm.sh +++ b/Builds/containers/packaging/rpm/build_rpm.sh @@ -30,8 +30,8 @@ fi cd /opt/rippled_bld/pkg/rippled if [[ -n $(git status --porcelain) ]]; then - git status - error "Unstaged changes in this repo - please commit first" + git status + error "Unstaged changes in this repo - please commit first" fi git archive --format tar.gz --prefix rippled/ -o ../rpmbuild/SOURCES/rippled.tar.gz HEAD # TODO include validator-keys sources @@ -54,18 +54,22 @@ cp ./rpmbuild/SRPMS/* ${PKG_OUTDIR} RPM_MD5SUM=$(rpm -q --queryformat '%{SIGMD5}\n' -p ./rpmbuild/RPMS/x86_64/rippled-[0-9]*.rpm 2>/dev/null) DBG_MD5SUM=$(rpm -q --queryformat '%{SIGMD5}\n' -p ./rpmbuild/RPMS/x86_64/rippled-debuginfo*.rpm 2>/dev/null) DEV_MD5SUM=$(rpm -q --queryformat '%{SIGMD5}\n' -p ./rpmbuild/RPMS/x86_64/rippled-devel*.rpm 2>/dev/null) +REP_MD5SUM=$(rpm -q --queryformat '%{SIGMD5}\n' -p ./rpmbuild/RPMS/x86_64/rippled-reporting*.rpm 2>/dev/null) SRC_MD5SUM=$(rpm -q --queryformat '%{SIGMD5}\n' -p ./rpmbuild/SRPMS/*.rpm 2>/dev/null) RPM_SHA256="$(sha256sum ./rpmbuild/RPMS/x86_64/rippled-[0-9]*.rpm | awk '{ print $1}')" DBG_SHA256="$(sha256sum ./rpmbuild/RPMS/x86_64/rippled-debuginfo*.rpm | awk '{ print $1}')" +REP_SHA256="$(sha256sum ./rpmbuild/RPMS/x86_64/rippled-reporting*.rpm | awk '{ print $1}')" DEV_SHA256="$(sha256sum ./rpmbuild/RPMS/x86_64/rippled-devel*.rpm | awk '{ print $1}')" SRC_SHA256="$(sha256sum ./rpmbuild/SRPMS/*.rpm | awk '{ print $1}')" echo "rpm_md5sum=$RPM_MD5SUM" > ${PKG_OUTDIR}/build_vars +echo "rep_md5sum=$REP_MD5SUM" >> ${PKG_OUTDIR}/build_vars echo "dbg_md5sum=$DBG_MD5SUM" >> ${PKG_OUTDIR}/build_vars echo "dev_md5sum=$DEV_MD5SUM" >> ${PKG_OUTDIR}/build_vars echo "src_md5sum=$SRC_MD5SUM" >> ${PKG_OUTDIR}/build_vars echo "rpm_sha256=$RPM_SHA256" >> ${PKG_OUTDIR}/build_vars +echo "rep_sha256=$REP_SHA256" >> ${PKG_OUTDIR}/build_vars echo "dbg_sha256=$DBG_SHA256" >> ${PKG_OUTDIR}/build_vars echo "dev_sha256=$DEV_SHA256" >> ${PKG_OUTDIR}/build_vars echo "src_sha256=$SRC_SHA256" >> ${PKG_OUTDIR}/build_vars @@ -73,4 +77,3 @@ echo "rippled_version=$RIPPLED_VERSION" >> ${PKG_OUTDIR}/build_vars echo "rpm_version=$RIPPLED_RPM_VERSION" >> ${PKG_OUTDIR}/build_vars echo "rpm_file_name=$tar_file" >> ${PKG_OUTDIR}/build_vars echo "rpm_version_release=$RPM_VERSION_RELEASE" >> ${PKG_OUTDIR}/build_vars - diff --git a/Builds/containers/packaging/rpm/rippled.spec b/Builds/containers/packaging/rpm/rippled.spec index 3b1ede3d48d..1ad2e278bf8 100644 --- a/Builds/containers/packaging/rpm/rippled.spec +++ b/Builds/containers/packaging/rpm/rippled.spec @@ -2,6 +2,7 @@ %define rpm_release %(echo $RPM_RELEASE) %define rpm_patch %(echo $RPM_PATCH) %define _prefix /opt/ripple + Name: rippled # Dashes in Version extensions must be converted to underscores Version: %{rippled_version} @@ -25,29 +26,41 @@ Requires: zlib-static %description devel core library for development of standalone applications that sign transactions. +%package reporting +Summary: Reporting Server for rippled + +%description reporting +History server for XRP Ledger + %prep %setup -c -n rippled %build cd rippled -mkdir -p bld.release -cd bld.release -cmake .. -G Ninja -DCMAKE_INSTALL_PREFIX=%{_prefix} -DCMAKE_BUILD_TYPE=Release -Dstatic=true -Dunity=OFF -DCMAKE_VERBOSE_MAKEFILE=OFF -Dvalidator_keys=ON -cmake --build . --parallel --target rippled --target validator-keys +mkdir -p bld.rippled +pushd bld.rippled +cmake .. -G Ninja -DCMAKE_INSTALL_PREFIX=%{_prefix} -DCMAKE_BUILD_TYPE=Release -Dunity=OFF -Dstatic=true -DCMAKE_VERBOSE_MAKEFILE=OFF -Dvalidator_keys=ON +cmake --build . --parallel $(nproc) --target rippled --target validator-keys +popd + +mkdir -p bld.rippled-reporting +cd bld.rippled-reporting +cmake .. -G Ninja -DCMAKE_INSTALL_PREFIX=%{_prefix}-reporting -DCMAKE_BUILD_TYPE=Release -Dunity=OFF -Dstatic=true -DCMAKE_VERBOSE_MAKEFILE=OFF -Dreporting=ON +cmake --build . --parallel $(nproc) --target rippled %pre test -e /etc/pki/tls || { mkdir -p /etc/pki; ln -s /usr/lib/ssl /etc/pki/tls; } %install rm -rf $RPM_BUILD_ROOT -DESTDIR=$RPM_BUILD_ROOT cmake --build rippled/bld.release --target install +DESTDIR=$RPM_BUILD_ROOT cmake --build rippled/bld.rippled --target install -- -v rm -rf ${RPM_BUILD_ROOT}/%{_prefix}/lib64/cmake/date install -d ${RPM_BUILD_ROOT}/etc/opt/ripple install -d ${RPM_BUILD_ROOT}/usr/local/bin ln -s %{_prefix}/etc/rippled.cfg ${RPM_BUILD_ROOT}/etc/opt/ripple/rippled.cfg ln -s %{_prefix}/etc/validators.txt ${RPM_BUILD_ROOT}/etc/opt/ripple/validators.txt ln -s %{_prefix}/bin/rippled ${RPM_BUILD_ROOT}/usr/local/bin/rippled -install -D rippled/bld.release/validator-keys/validator-keys ${RPM_BUILD_ROOT}%{_bindir}/validator-keys +install -D rippled/bld.rippled/validator-keys/validator-keys ${RPM_BUILD_ROOT}%{_bindir}/validator-keys install -D ./rippled/Builds/containers/shared/rippled.service ${RPM_BUILD_ROOT}/usr/lib/systemd/system/rippled.service install -D ./rippled/Builds/containers/packaging/rpm/50-rippled.preset ${RPM_BUILD_ROOT}/usr/lib/systemd/system-preset/50-rippled.preset install -D ./rippled/Builds/containers/shared/update-rippled.sh ${RPM_BUILD_ROOT}%{_bindir}/update-rippled.sh @@ -57,7 +70,27 @@ install -D ./rippled/Builds/containers/shared/rippled-logrotate ${RPM_BUILD_ROOT install -d $RPM_BUILD_ROOT/var/log/rippled install -d $RPM_BUILD_ROOT/var/lib/rippled +# reporting mode +%define _prefix /opt/rippled-reporting +mkdir -p ${RPM_BUILD_ROOT}/etc/opt/rippled-reporting/ +install -D rippled/bld.rippled-reporting/rippled-reporting ${RPM_BUILD_ROOT}%{_bindir}/rippled-reporting +install -D ./rippled/cfg/rippled-reporting.cfg ${RPM_BUILD_ROOT}%{_prefix}/etc/rippled-reporting.cfg +install -D ./rippled/cfg/validators-example.txt ${RPM_BUILD_ROOT}%{_prefix}/etc/validators.txt +install -D ./rippled/Builds/containers/packaging/rpm/50-rippled-reporting.preset ${RPM_BUILD_ROOT}/usr/lib/systemd/system-preset/50-rippled-reporting.preset +ln -s %{_prefix}/bin/rippled-reporting ${RPM_BUILD_ROOT}/usr/local/bin/rippled-reporting +ln -s %{_prefix}/etc/rippled-reporting.cfg ${RPM_BUILD_ROOT}/etc/opt/rippled-reporting/rippled-reporting.cfg +ln -s %{_prefix}/etc/validators.txt ${RPM_BUILD_ROOT}/etc/opt/rippled-reporting/validators.txt +install -d $RPM_BUILD_ROOT/var/log/rippled-reporting +install -d $RPM_BUILD_ROOT/var/lib/rippled-reporting +install -D ./rippled/Builds/containers/shared/rippled-reporting.service ${RPM_BUILD_ROOT}/usr/lib/systemd/system/rippled-reporting.service +sed -E 's/rippled?/rippled-reporting/g' ./rippled/Builds/containers/shared/update-rippled.sh > ${RPM_BUILD_ROOT}%{_bindir}/update-rippled-reporting.sh +sed -E 's/rippled?/rippled-reporting/g' ./rippled/bin/getRippledInfo > ${RPM_BUILD_ROOT}%{_bindir}/getRippledReportingInfo +sed -E 's/rippled?/rippled-reporting/g' ./rippled/Builds/containers/shared/update-rippled-cron > ${RPM_BUILD_ROOT}%{_prefix}/etc/update-rippled-reporting-cron +sed -E 's/rippled?/rippled-reporting/g' ./rippled/Builds/containers/shared/rippled-logrotate > ${RPM_BUILD_ROOT}/etc/logrotate.d/rippled-reporting + + %post +%define _prefix /opt/ripple USER_NAME=rippled GROUP_NAME=rippled @@ -75,7 +108,25 @@ chmod 644 %{_prefix}/etc/update-rippled-cron chmod 644 /etc/logrotate.d/rippled chown -R root:$GROUP_NAME %{_prefix}/etc/update-rippled-cron +%post reporting +%define _prefix /opt/rippled-reporting +USER_NAME=rippled-reporting +GROUP_NAME=rippled-reporting + +getent passwd $USER_NAME &>/dev/null || useradd -r $USER_NAME +getent group $GROUP_NAME &>/dev/null || groupadd $GROUP_NAME + +chown -R $USER_NAME:$GROUP_NAME /var/log/rippled-reporting/ +chown -R $USER_NAME:$GROUP_NAME /var/lib/rippled-reporting/ +chown -R $USER_NAME:$GROUP_NAME %{_prefix}/ + +chmod 755 /var/log/rippled-reporting/ +chmod 755 /var/lib/rippled-reporting/ +chmod -x /usr/lib/systemd/system/rippled-reporting.service + + %files +%define _prefix /opt/ripple %doc rippled/README.md rippled/LICENSE.md %{_bindir}/rippled /usr/local/bin/rippled @@ -98,6 +149,25 @@ chown -R root:$GROUP_NAME %{_prefix}/etc/update-rippled-cron %{_prefix}/lib/*.a %{_prefix}/lib/cmake/ripple +%files reporting +%define _prefix /opt/rippled-reporting +%doc rippled/README.md rippled/LICENSE.md + +%{_bindir}/rippled-reporting +/usr/local/bin/rippled-reporting +%config(noreplace) /etc/opt/rippled-reporting/rippled-reporting.cfg +%config(noreplace) %{_prefix}/etc/rippled-reporting.cfg +%config(noreplace) %{_prefix}/etc/validators.txt +%config(noreplace) /etc/opt/rippled-reporting/validators.txt +%config(noreplace) /usr/lib/systemd/system/rippled-reporting.service +%config(noreplace) /usr/lib/systemd/system-preset/50-rippled-reporting.preset +%dir /var/log/rippled-reporting/ +%dir /var/lib/rippled-reporting/ +%{_bindir}/update-rippled-reporting.sh +%{_bindir}/getRippledReportingInfo +%{_prefix}/etc/update-rippled-reporting-cron +%config(noreplace) /etc/logrotate.d/rippled-reporting + %changelog * Wed Aug 28 2019 Mike Ellery - Switch to subproject build for validator-keys diff --git a/Builds/containers/shared/build_deps.sh b/Builds/containers/shared/build_deps.sh index dc91e99bd64..8e11d01508f 100755 --- a/Builds/containers/shared/build_deps.sh +++ b/Builds/containers/shared/build_deps.sh @@ -30,7 +30,7 @@ cd openssl-${OPENSSL_VER} SSLDIR=$(openssl version -d | cut -d: -f2 | tr -d [:space:]\") ./config -fPIC --prefix=/opt/local/openssl --openssldir=${SSLDIR} zlib shared make -j$(nproc) >> make_output.txt 2>&1 -make install +make install >> make_output.txt 2>&1 cd .. rm -f openssl-${OPENSSL_VER}.tar.gz rm -rf openssl-${OPENSSL_VER} @@ -43,7 +43,7 @@ cd libarchive-3.4.1 mkdir _bld && cd _bld cmake -DCMAKE_BUILD_TYPE=Release .. make -j$(nproc) >> make_output.txt 2>&1 -make install +make install >> make_output.txt 2>&1 cd ../.. rm -f libarchive-3.4.1.tar.gz rm -rf libarchive-3.4.1 @@ -55,7 +55,7 @@ cd protobuf-3.10.1 ./autogen.sh ./configure make -j$(nproc) >> make_output.txt 2>&1 -make install +make install >> make_output.txt 2>&1 ldconfig cd .. rm -f protobuf-all-3.10.1.tar.gz @@ -78,7 +78,7 @@ cmake \ -DCARES_BUILD_CONTAINER_TESTS=OFF \ .. make -j$(nproc) >> make_output.txt 2>&1 -make install +make install >> make_output.txt 2>&1 cd ../.. rm -f c-ares-1.15.0.tar.gz rm -rf c-ares-1.15.0 @@ -98,7 +98,7 @@ cmake \ -DProtobuf_USE_STATIC_LIBS=ON \ .. make -j$(nproc) >> make_output.txt 2>&1 -make install +make install >> make_output.txt 2>&1 cd ../.. rm -f xf v1.25.0.tar.gz rm -rf grpc-1.25.0 @@ -115,7 +115,7 @@ if [ "${CI_USE}" = true ] ; then cd build cmake -G "Unix Makefiles" .. make -j$(nproc) >> make_output.txt 2>&1 - make install + make install >> make_output.txt 2>&1 cd ../.. rm -f Release_1_8_16.tar.gz rm -rf doxygen-Release_1_8_16 @@ -136,8 +136,8 @@ if [ "${CI_USE}" = true ] ; then tar xf ccache-3.7.6.tar.gz cd ccache-3.7.6 ./configure --prefix=/usr/local - make - make install + make >> make_output.txt 2>&1 + make install >> make_output.txt 2>&1 cd .. rm -f ccache-3.7.6.tar.gz rm -rf ccache-3.7.6 diff --git a/Builds/containers/shared/rippled-reporting.service b/Builds/containers/shared/rippled-reporting.service new file mode 100644 index 00000000000..69edf4794ab --- /dev/null +++ b/Builds/containers/shared/rippled-reporting.service @@ -0,0 +1,15 @@ +[Unit] +Description=Ripple Daemon +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +ExecStart=/opt/rippled-reporting/bin/rippled-reporting --silent --conf /etc/opt/rippled-reporting/rippled-reporting.cfg +Restart=on-failure +User=rippled-reporting +Group=rippled-reporting +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target diff --git a/CMakeLists.txt b/CMakeLists.txt index ade87d3498e..c5e750d7b41 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -25,12 +25,7 @@ list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake") list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/deps") include (CheckCXXCompilerFlag) -if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.11) - include (FetchContent) -endif () -if (MSVC AND CMAKE_VERSION VERSION_LESS 3.12) - message (FATAL_ERROR "MSVC requires cmake 3.12 or greater for proper boost support") -endif () +include (FetchContent) include (ExternalProject) include (CMakeFuncs) # must come *after* ExternalProject b/c it overrides one function in EP include (ProcessorCount) diff --git a/cfg/rippled-reporting.cfg b/cfg/rippled-reporting.cfg new file mode 100644 index 00000000000..7e69d76f4f5 --- /dev/null +++ b/cfg/rippled-reporting.cfg @@ -0,0 +1,1703 @@ +#------------------------------------------------------------------------------- +# +# +#------------------------------------------------------------------------------- +# +# Contents +# +# 1. Server +# +# 2. Peer Protocol +# +# 3. Ripple Protocol +# +# 4. HTTPS Client +# +# 5. Reporting Mode +# +# 6. Database +# +# 7. Diagnostics +# +# 8. Voting +# +# 9. Misc Settings +# +# 10. Example Settings +# +#------------------------------------------------------------------------------- +# +# Purpose +# +# This file documents and provides examples of all rippled server process +# configuration options. When the rippled server instance is launched, it +# looks for a file with the following name: +# +# rippled.cfg +# +# For more information on where the rippled server instance searches for the +# file, visit: +# +# https://xrpl.org/commandline-usage.html#generic-options +# +# This file should be named rippled.cfg. This file is UTF-8 with DOS, UNIX, +# or Mac style end of lines. Blank lines and lines beginning with '#' are +# ignored. Undefined sections are reserved. No escapes are currently defined. +# +# Notation +# +# In this document a simple BNF notation is used. Angle brackets denote +# required elements, square brackets denote optional elements, and single +# quotes indicate string literals. A vertical bar separating 1 or more +# elements is a logical "or"; any one of the elements may be chosen. +# Parentheses are notational only, and used to group elements; they are not +# part of the syntax unless they appear in quotes. White space may always +# appear between elements, it has no effect on values. +# +# A required identifier +# '=' The equals sign character +# | Logical "or" +# ( ) Used for grouping +# +# +# An identifier is a string of upper or lower case letters, digits, or +# underscores subject to the requirement that the first character of an +# identifier must be a letter. Identifiers are not case sensitive (but +# values may be). +# +# Some configuration sections contain key/value pairs. A line containing +# a key/value pair has this syntax: +# +# '=' +# +# Depending on the section and key, different value types are possible: +# +# A signed integer +# An unsigned integer +# A boolean. 1 = true/yes/on, 0 = false/no/off. +# +# Consult the documentation on the key in question to determine the possible +# value types. +# +# +# +#------------------------------------------------------------------------------- +# +# 1. Server +# +#---------- +# +# +# +# rippled offers various server protocols to clients making inbound +# connections. The listening ports rippled uses are "universal" ports +# which may be configured to handshake in one or more of the available +# supported protocols. These universal ports simplify administration: +# A single open port can be used for multiple protocols. +# +# NOTE At least one server port must be defined in order +# to accept incoming network connections. +# +# +# [server] +# +# A list of port names and key/value pairs. A port name must start with a +# letter and contain only letters and numbers. The name is not case-sensitive. +# For each name in this list, rippled will look for a configuration file +# section with the same name and use it to create a listening port. The +# name is informational only; the choice of name does not affect the function +# of the listening port. +# +# Key/value pairs specified in this section are optional, and apply to all +# listening ports unless the port overrides the value in its section. They +# may be considered default values. +# +# Suggestion: +# +# To avoid a conflict with port names and future configuration sections, +# we recommend prepending "port_" to the port name. This prefix is not +# required, but suggested. +# +# This example defines two ports with different port numbers and settings: +# +# [server] +# port_public +# port_private +# port = 80 +# +# [port_public] +# ip = 0.0.0.0 +# port = 443 +# protocol = peer,https +# +# [port_private] +# ip = 127.0.0.1 +# protocol = http +# +# When rippled is used as a command line client (for example, issuing a +# server stop command), the first port advertising the http or https +# protocol will be used to make the connection. +# +# +# +# [] +# +# A series of key/value pairs that define the settings for the port with +# the corresponding name. These keys are possible: +# +# ip = +# +# Required. Determines the IP address of the network interface to bind +# to. To bind to all available IPv4 interfaces, use 0.0.0.0 +# To binding to all IPv4 and IPv6 interfaces, use :: +# +# NOTE if the ip value is ::, then any incoming IPv4 connections will +# be made as mapped IPv4 addresses. +# +# port = +# +# Required. Sets the port number to use for this port. +# +# protocol = [ http, https, peer ] +# +# Required. A comma-separated list of protocols to support: +# +# http JSON-RPC over HTTP +# https JSON-RPC over HTTPS +# ws Websockets +# wss Secure Websockets +# peer Peer Protocol +# +# Restrictions: +# +# Only one port may be configured to support the peer protocol. +# A port cannot have websocket and non websocket protocols at the +# same time. It is possible have both Websockets and Secure Websockets +# together in one port. +# +# NOTE If no ports support the peer protocol, rippled cannot +# receive incoming peer connections or become a superpeer. +# +# limit = +# +# Optional. An integer value that will limit the number of connected +# clients that the port will accept. Once the limit is reached, new +# connections will be refused until other clients disconnect. +# Omit or set to 0 to allow unlimited numbers of clients. +# +# user = +# password = +# +# When set, these credentials will be required on HTTP/S requests. +# The credentials must be provided using HTTP's Basic Authentication +# headers. If either or both fields are empty, then no credentials are +# required. IP address restrictions, if any, will be checked in addition +# to the credentials specified here. +# +# When acting in the client role, rippled will supply these credentials +# using HTTP's Basic Authentication headers when making outbound HTTP/S +# requests. +# +# admin = [ IP, IP, IP, ... ] +# +# A comma-separated list of IP addresses. +# +# When set, grants administrative command access to the specified IP +# addresses. These commands may be issued over http, https, ws, or wss +# if configured on the port. If not provided, the default is to not allow +# administrative commands. +# +# NOTE A common configuration value for the admin field is "localhost". +# If you are listening on all IPv4/IPv6 addresses by specifing +# ip = :: then you can use admin = ::ffff:127.0.0.1,::1 to allow +# administrative access from both IPv4 and IPv6 localhost +# connections. +# +# *SECURITY WARNING* +# 0.0.0.0 or :: may be used to allow access from any IP address. It must +# be the only address specified and cannot be combined with other IPs. +# Use of this address can compromise server security, please consider its +# use carefully. +# +# admin_user = +# admin_password = +# +# When set, clients must provide these credentials in the submitted +# JSON for any administrative command requests submitted to the HTTP/S, +# WS, or WSS protocol interfaces. If administrative commands are +# disabled for a port, these credentials have no effect. +# +# When acting in the client role, rippled will supply these credentials +# in the submitted JSON for any administrative command requests when +# invoking JSON-RPC commands on remote servers. +# +# secure_gateway = [ IP, IP, IP, ... ] +# +# A comma-separated list of IP addresses. +# +# When set, allows the specified IP addresses to pass HTTP headers +# containing username and remote IP address for each session. If a +# non-empty username is passed in this way, then resource controls +# such as often resulting in "tooBusy" errors will be lifted. However, +# administrative RPC commands such as "stop" will not be allowed. +# The HTTP headers that secure_gateway hosts can set are X-User and +# X-Forwarded-For. Only the X-User header affects resource controls. +# However, both header values are logged to help identify user activity. +# If no X-User header is passed, or if its value is empty, then +# resource controls will default to those for non-administrative users. +# +# The secure_gateway IP addresses are intended to represent +# proxies. Since rippled trusts these hosts, they must be +# responsible for properly authenticating the remote user. +# +# The same IP address cannot be used in both "admin" and "secure_gateway" +# lists for the same port. In this case, rippled will abort with an error +# message to the console shortly after startup +# +# ssl_key = +# ssl_cert = +# ssl_chain = +# +# Use the specified files when configuring SSL on the port. +# +# NOTE If no files are specified and secure protocols are selected, +# rippled will generate an internal self-signed certificate. +# +# The files have these meanings: +# +# ssl_key +# +# Specifies the filename holding the SSL key in PEM format. +# +# ssl_cert +# +# Specifies the path to the SSL certificate file in PEM format. +# This is not needed if the chain includes it. +# +# ssl_chain +# +# If you need a certificate chain, specify the path to the +# certificate chain here. The chain may include the end certificate. +# +# ssl_ciphers = +# +# Control the ciphers which the server will support over SSL on the port, +# specified using the OpenSSL "cipher list format". +# +# NOTE If unspecified, rippled will automatically configure a modern +# cipher suite. This default suite should be widely supported. +# +# You should not modify this string unless you have a specific +# reason and cryptographic expertise. Incorrect modification may +# keep rippled from connecting to other instances of rippled or +# prevent RPC and WebSocket clients from connecting. +# +# send_queue_limit = [1..65535] +# +# A Websocket will disconnect when its send queue exceeds this limit. +# The default is 100. A larger value may help with erratic disconnects but +# may adversely affect server performance. +# +# WebSocket permessage-deflate extension options +# +# These settings configure the optional permessage-deflate extension +# options and may appear on any port configuration entry. They are meaningful +# only to ports which have enabled a WebSocket protocol. +# +# permessage_deflate = +# +# Determines if permessage_deflate extension negotiations are enabled. +# When enabled, clients may request the extension and the server will +# offer the enabled extension in response. +# +# client_max_window_bits = [9..15] +# server_max_window_bits = [9..15] +# client_no_context_takeover = +# server_no_context_takeover = +# +# These optional settings control options related to the permessage-deflate +# extension negotiation. For precise definitions of these fields please see +# the RFC 7692, "Compression Extensions for WebSocket": +# https://tools.ietf.org/html/rfc7692 +# +# compress_level = [0..9] +# +# When set, determines the amount of compression attempted, where 0 is +# the least amount and 9 is the most amount. Higher levels require more +# CPU resources. Levels 1 through 3 use a fast compression algorithm, +# while levels 4 through 9 use a more compact algorithm which uses more +# CPU resources. If unspecified, a default of 3 is used. +# +# memory_level = [1..9] +# +# When set, determines the relative amount of memory used to hold +# intermediate compression data. Higher numbers can give better compression +# ratios at the cost of higher memory and CPU resources. +# +# [rpc_startup] +# +# Specify a list of RPC commands to run at startup. +# +# Examples: +# { "command" : "server_info" } +# { "command" : "log_level", "partition" : "ripplecalc", "severity" : "trace" } +# +# +# +# [websocket_ping_frequency] +# +# +# +# The amount of time to wait in seconds, before sending a websocket 'ping' +# message. Ping messages are used to determine if the remote end of the +# connection is no longer available. +# +# +# [server_domain] +# +# domain name +# +# The domain under which a TOML file applicable to this server can be +# found. A server may lie about its domain so the TOML should contain +# a reference to this server by pubkey in the [nodes] array. +# +# +#------------------------------------------------------------------------------- +# +# 2. Peer Protocol +# +#----------------- +# +# These settings control security and access attributes of the Peer to Peer +# server section of the rippled process. Peer Protocol implements the +# Ripple Payment protocol. It is over peer connections that transactions +# and validations are passed from to machine to machine, to determine the +# contents of validated ledgers. +# +# +# +# [ips] +# +# List of hostnames or ips where the Ripple protocol is served. A default +# starter list is included in the code and used if no other hostnames are +# available. +# +# One address or domain name per line is allowed. A port may must be +# specified after adding a space to the address. The ordering of entries +# does not generally matter. +# +# The default list of entries is: +# - r.ripple.com 51235 +# - zaphod.alloy.ee 51235 +# - sahyadri.isrdc.in 51235 +# +# Examples: +# +# [ips] +# 192.168.0.1 +# 192.168.0.1 2459 +# r.ripple.com 51235 +# +# +# [ips_fixed] +# +# List of IP addresses or hostnames to which rippled should always attempt to +# maintain peer connections with. This is useful for manually forming private +# networks, for example to configure a validation server that connects to the +# Ripple network through a public-facing server, or for building a set +# of cluster peers. +# +# One address or domain names per line is allowed. A port must be specified +# after adding a space to the address. +# +# +# +# [peer_private] +# +# 0 or 1. +# +# 0: Request peers to broadcast your address. Normal outbound peer connections [default] +# 1: Request peers not broadcast your address. Only connect to configured peers. +# +# +# +# [peers_max] +# +# The largest number of desired peer connections (incoming or outgoing). +# Cluster and fixed peers do not count towards this total. There are +# implementation-defined lower limits imposed on this value for security +# purposes. +# +# +# +# [node_seed] +# +# This is used for clustering. To force a particular node seed or key, the +# key can be set here. The format is the same as the validation_seed field. +# To obtain a validation seed, use the validation_create command. +# +# Examples: RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE +# shfArahZT9Q9ckTf3s1psJ7C7qzVN +# +# +# +# [cluster_nodes] +# +# To extend full trust to other nodes, place their node public keys here. +# Generally, you should only do this for nodes under common administration. +# Node public keys start with an 'n'. To give a node a name for identification +# place a space after the public key and then the name. +# +# +# +# [sntp_servers] +# +# IP address or domain of NTP servers to use for time synchronization. +# +# These NTP servers are suitable for rippled servers located in the United +# States: +# time.windows.com +# time.apple.com +# time.nist.gov +# pool.ntp.org +# +# +# +# [max_transactions] +# +# Configure the maximum number of transactions to have in the job queue +# +# Must be a number between 100 and 1000, defaults to 250 +# +# +# [overlay] +# +# Controls settings related to the peer to peer overlay. +# +# A set of key/value pair parameters to configure the overlay. +# +# public_ip = +# +# If the server has a known, fixed public IPv4 address, +# specify that IP address here in dotted decimal notation. +# Peers will use this information to reject attempt to proxy +# connections to or from this server. +# +# ip_limit = +# +# The maximum number of incoming peer connections allowed by a single +# IP that isn't classified as "private" in RFC1918. The implementation +# imposes some hard and soft upper limits on this value to prevent a +# single host from consuming all inbound slots. If the value is not +# present the server will autoconfigure an appropriate limit. +# +# max_unknown_time = +# +# The maximum amount of time, in seconds, that an outbound connection +# is allowed to stay in the "unknown" tracking state. This option can +# take any value between 300 and 1800 seconds, inclusive. If the option +# is not present the server will autoconfigure an appropriate limit. +# +# The current default (which is subject to change) is 600 seconds. +# +# max_diverged_time = +# +# The maximum amount of time, in seconds, that an outbound connection +# is allowed to stay in the "diverged" tracking state. The option can +# take any value between 60 and 900 seconds, inclusive. If the option +# is not present the server will autoconfigure an appropriate limit. +# +# The current default (which is subject to change) is 300 seconds. +# +# +# [transaction_queue] EXPERIMENTAL +# +# This section is EXPERIMENTAL, and should not be +# present for production configuration settings. +# +# A set of key/value pair parameters to tune the performance of the +# transaction queue. +# +# ledgers_in_queue = +# +# The queue will be limited to this of average ledgers' +# worth of transactions. If the queue fills up, the transactions +# with the lowest fee levels will be dropped from the queue any +# time a transaction with a higher fee level is added. +# Default: 20. +# +# minimum_queue_size = +# +# The queue will always be able to hold at least this of +# transactions, regardless of recent ledger sizes or the value of +# ledgers_in_queue. Default: 2000. +# +# retry_sequence_percent = +# +# If a client replaces a transaction in the queue (same sequence +# number as a transaction already in the queue), the new +# transaction's fee must be more than percent higher +# than the original transaction's fee, or meet the current open +# ledger fee to be considered. Default: 25. +# +# minimum_escalation_multiplier = +# +# At ledger close time, the median fee level of the transactions +# in that ledger is used as a multiplier in escalation +# calculations of the next ledger. This minimum value ensures that +# the escalation is significant. Default: 500. +# +# minimum_txn_in_ledger = +# +# Minimum number of transactions that must be allowed into the +# ledger at the minimum required fee before the required fee +# escalates. Default: 5. +# +# minimum_txn_in_ledger_standalone = +# +# Like minimum_txn_in_ledger when rippled is running in standalone +# mode. Default: 1000. +# +# target_txn_in_ledger = +# +# Number of transactions allowed into the ledger at the minimum +# required fee that the queue will "work toward" as long as +# consensus stays healthy. The limit will grow quickly until it +# reaches or exceeds this number. After that the limit may still +# change, but will stay above the target. If consensus is not +# healthy, the limit will be clamped to this value or lower. +# Default: 50. +# +# maximum_txn_in_ledger = +# +# (Optional) Maximum number of transactions that will be allowed +# into the ledger at the minimum required fee before the required +# fee escalates. Default: no maximum. +# +# normal_consensus_increase_percent = +# +# (Optional) When the ledger has more transactions than "expected", +# and performance is humming along nicely, the expected ledger size +# is updated to the previous ledger size plus this percentage. +# Default: 20 +# +# slow_consensus_decrease_percent = +# +# (Optional) When consensus takes longer than appropriate, the +# expected ledger size is updated to the minimum of the previous +# ledger size or the "expected" ledger size minus this percentage. +# Default: 50 +# +# maximum_txn_per_account = +# +# Maximum number of transactions that one account can have in the +# queue at any given time. Default: 10. +# +# minimum_last_ledger_buffer = +# +# If a transaction has a LastLedgerSequence, it must be at least +# this much larger than the current open ledger sequence number. +# Default: 2. +# +# zero_basefee_transaction_feelevel = +# +# So we don't deal with infinite fee levels, treat any transaction +# with a 0 base fee (ie. SetRegularKey password recovery) as +# having this fee level. +# Default: 256000. +# +# +#------------------------------------------------------------------------------- +# +# 3. Protocol +# +#------------------- +# +# These settings affect the behavior of the server instance with respect +# to protocol level activities such as validating and closing ledgers +# adjusting fees in response to server overloads. +# +# +# +# +# [relay_proposals] +# +# Controls the relaying behavior for proposals received by this server that +# are issued by validators that are not on the server's UNL. +# +# Legal values are: "trusted" and "all". The default is "trusted". +# +# +# [relay_validations] +# +# Controls the relaying behavior for validations received by this server that +# are issued by validators that are not on the server's UNL. +# +# Legal values are: "trusted" and "all". The default is "all". +# +# +# +# +# +# [ledger_history] +# +# The number of past ledgers to acquire on server startup and the minimum to +# maintain while running. +# +# To serve clients, servers need historical ledger data. Servers that don't +# need to serve clients can set this to "none". Servers that want complete +# history can set this to "full". +# +# This must be less than or equal to online_delete (if online_delete is used) +# +# The default is: 256 +# +# +# +# [fetch_depth] +# +# The number of past ledgers to serve to other peers that request historical +# ledger data (or "full" for no limit). +# +# Servers that require low latency and high local performance may wish to +# restrict the historical ledgers they are willing to serve. Setting this +# below 32 can harm network stability as servers require easy access to +# recent history to stay in sync. Values below 128 are not recommended. +# +# The default is: full +# +# +# +# [validation_seed] +# +# To perform validation, this section should contain either a validation seed +# or key. The validation seed is used to generate the validation +# public/private key pair. To obtain a validation seed, use the +# validation_create command. +# +# Examples: RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE +# shfArahZT9Q9ckTf3s1psJ7C7qzVN +# +# +# +# [validator_token] +# +# This is an alternative to [validation_seed] that allows rippled to perform +# validation without having to store the validator keys on the network +# connected server. The field should contain a single token in the form of a +# base64-encoded blob. +# An external tool is available for generating validator keys and tokens. +# +# +# +# [validator_key_revocation] +# +# If a validator's secret key has been compromised, a revocation must be +# generated and added to this field. The revocation notifies peers that it is +# no longer safe to trust the revoked key. The field should contain a single +# revocation in the form of a base64-encoded blob. +# An external tool is available for generating and revoking validator keys. +# +# +# +# [validators_file] +# +# Path or name of a file that determines the nodes to always accept as validators. +# +# The contents of the file should include a [validators] and/or +# [validator_list_sites] and [validator_list_keys] entries. +# [validators] should be followed by a list of validation public keys of +# nodes, one per line. +# [validator_list_sites] should be followed by a list of URIs each serving a +# list of recommended validators. +# [validator_list_keys] should be followed by a list of keys belonging to +# trusted validator list publishers. Validator lists fetched from configured +# sites will only be considered if the list is accompanied by a valid +# signature from a trusted publisher key. +# +# Specify the file by its name or path. +# Unless an absolute path is specified, it will be considered relative to +# the folder in which the rippled.cfg file is located. +# +# Examples: +# /home/ripple/validators.txt +# C:/home/ripple/validators.txt +# +# Example content: +# [validators] +# n949f75evCHwgyP4fPVgaHqNHxUVN15PsJEZ3B3HnXPcPjcZAoy7 +# n9MD5h24qrQqiyBC8aeqqCWvpiBiYQ3jxSr91uiDvmrkyHRdYLUj +# n9L81uNCaPgtUJfaHh89gmdvXKAmSt5Gdsw2g1iPWaPkAHW5Nm4C +# n9KiYM9CgngLvtRCQHZwgC2gjpdaZcCcbt3VboxiNFcKuwFVujzS +# n9LdgEtkmGB9E2h3K4Vp7iGUaKuq23Zr32ehxiU8FWY7xoxbWTSA +# +# +# +# [path_search] +# When searching for paths, the default search aggressiveness. This can take +# exponentially more resources as the size is increased. +# +# The default is: 7 +# +# [path_search_fast] +# [path_search_max] +# When searching for paths, the minimum and maximum search aggressiveness. +# +# If you do not need pathfinding, you can set path_search_max to zero to +# disable it and avoid some expensive bookkeeping. +# +# The default for 'path_search_fast' is 2. The default for 'path_search_max' is 10. +# +# [path_search_old] +# +# For clients that use the legacy path finding interfaces, the search +# aggressiveness to use. The default is 7. +# +# +# +# [fee_default] +# +# Sets the base cost of a transaction in drops. Used when the server has +# no other source of fee information, such as signing transactions offline. +# +# +# +# [workers] +# +# Configures the number of threads for processing work submitted by peers +# and clients. If not specified, then the value is automatically set to the +# number of processor threads plus 2 for networked nodes. Nodes running in +# stand alone mode default to 1 worker. +# +# +# +# [network_id] +# +# Specify the network which this server is configured to connect to and +# track. If set, the server will not establish connections with servers +# that are explicitly configured to track another network. +# +# Network identifiers are usually unsigned integers in the range 0 to +# 4294967295 inclusive. The server also maps the following well-known +# names to the corresponding numerical identifier: +# +# main -> 0 +# testnet -> 1 +# devnet -> 2 +# +# If this value is not specified the server is not explicitly configured +# to track a particular network. +# +# +# [ledger_replay] +# +# 0 or 1. +# +# 0: Disable the ledger replay feature [default] +# 1: Enable the ledger replay feature. With this feature enabled, when +# acquiring a ledger from the network, a rippled node only downloads +# the ledger header and the transactions instead of the whole ledger. +# And the ledger is built by applying the transactions to the parent +# ledger. +# +#------------------------------------------------------------------------------- +# +# 4. HTTPS Client +# +#---------------- +# +# The rippled server instance uses HTTPS GET requests in a variety of +# circumstances, including but not limited to contacting trusted domains to +# fetch information such as mapping an email address to a Ripple Payment +# Network address. +# +# [ssl_verify] +# +# 0 or 1. +# +# 0. HTTPS client connections will not verify certificates. +# 1. Certificates will be checked for HTTPS client connections. +# +# If not specified, this parameter defaults to 1. +# +# +# +# [ssl_verify_file] +# +# +# +# A file system path leading to the certificate verification file for +# HTTPS client requests. +# +# +# +# [ssl_verify_dir] +# +# +# +# +# A file system path leading to a file or directory containing the root +# certificates that the server will accept for verifying HTTP servers. +# Used only for outbound HTTPS client connections. +# +#------------------------------------------------------------------------------- +# +# 5. Reporting Mode +# +#------------ +# +# rippled has an optional operating mode called Reporting Mode. In Reporting +# Mode, rippled does not connect to the peer to peer network. Instead, rippled +# will continuously extract data from one or more rippled servers that are +# connected to the peer to peer network (referred to as an ETL source). +# Reporting mode servers will forward RPC requests that require access to the +# peer to peer network (submit, fee, etc) to an ETL source. +# +# [reporting] Settings for Reporting Mode. If and only if this section is +# present, rippled will start in reporting mode. This section +# contains a list of ETL source names, and key-value pairs. The +# ETL source names each correspond to a configuration file +# section; the names must match exactly. The key-value pairs are +# optional. +# +# +# [] +# +# A series of key/value pairs that specify an ETL source. +# +# source_ip = +# +# Required. IP address of the ETL source. Can also be a DNS record. +# +# source_ws_port = +# +# Required. Port on which ETL source is accepting unencrypted websocket +# connections. +# +# source_grpc_port = +# +# Required for ETL. Port on which ETL source is accepting gRPC requests. +# If this option is ommitted, this ETL source cannot actually be used for +# ETL; the Reporting Mode server can still forward RPCs to this ETL +# source, but cannot extract data from this ETL source. +# +# +# Key-value pairs (all optional): +# +# read_only Valid values: 0, 1. Default is 0. If set to 1, the server +# will start in strict read-only mode, and will not perform +# ETL. The server will still handle RPC requests, and will +# still forward RPC requests that require access to the p2p +# network. +# +# start_sequence +# Sequence of first ledger to extract if the database is empty. +# ETL extracts ledgers in order. If this setting is absent and +# the database is empty, ETL will start with the next ledger +# validated by the network. If this setting is present and the +# database is not empty, an exception is thrown. +# +# num_markers Degree of parallelism used during the initial ledger +# download. Only used if the database is empty. Valid values +# are 1-256. A higher degree of parallelism results in a +# faster download, but puts more load on the ETL source. +# Default is 2. +# +# Example: +# +# [reporting] +# etl_source1 +# etl_source2 +# read_only=0 +# start_sequence=32570 +# num_markers=8 +# +# [etl_source1] +# source_ip=1.2.3.4 +# source_ws_port=6005 +# source_grpc_port=50051 +# +# [etl_source2] +# source_ip=5.6.7.8 +# source_ws_port=6005 +# source_grpc_port=50051 +# +# Minimal Example: +# +# [reporting] +# etl_source1 +# +# [etl_source1] +# source_ip=1.2.3.4 +# source_ws_port=6005 +# source_grpc_port=50051 +# +# +# Notes: +# +# Reporting Mode requires Postgres (instead of SQLite). The Postgres +# connection info is specified under the [ledger_tx_tables] config section; +# see the Database section for further documentation. +# +# Each ETL source specified must have gRPC enabled (by adding a [port_grpc] +# section to the config). It is recommended to add a secure_gateway entry to +# the gRPC section, in order to bypass the server's rate limiting. +# This section needs to be added to the config of the ETL source, not +# the config of the reporting node. In the example below, the +# reporting server is running at 127.0.0.1. Multiple IPs can be +# specified in secure_gateway via a comma separated list. +# +# [port_grpc] +# ip = 0.0.0.0 +# port = 50051 +# secure_gateway = 127.0.0.1 +# +# +#------------------------------------------------------------------------------- +# +# 6. Database +# +#------------ +# +# rippled creates 4 SQLite database to hold bookkeeping information +# about transactions, local credentials, and various other things. +# It also creates the NodeDB, which holds all the objects that +# make up the current and historical ledgers. In Reporting Mode, rippled +# uses a Postgres database instead of SQLite. +# +# The simplest way to work with Postgres is to install it locally. +# When it is running, execute the initdb.sh script in the current +# directory as: sudo -u postgres ./initdb.sh +# This will create the rippled user and an empty database of the same name. +# +# The size of the NodeDB grows in proportion to the amount of new data and the +# amount of historical data (a configurable setting) so the performance of the +# underlying storage media where the NodeDB is placed can significantly affect +# the performance of the server. +# +# Partial pathnames will be considered relative to the location of +# the rippled.cfg file. +# +# [node_db] Settings for the Node Database (required) +# +# Format (without spaces): +# One or more lines of case-insensitive key / value pairs: +# '=' +# ... +# +# Example: +# type=nudb +# path=db/nudb +# +# The "type" field must be present and controls the choice of backend: +# +# type = NuDB +# +# NuDB is a high-performance database written by Ripple Labs and optimized +# for rippled and solid-state drives. +# +# NuDB maintains its high speed regardless of the amount of history +# stored. Online delete may be selected, but is not required. NuDB is +# available on all platforms that rippled runs on. +# +# type = RocksDB +# +# RocksDB is an open-source, general-purpose key/value store - see +# http://rocksdb.org/ for more details. +# +# RocksDB is an alternative backend for systems that don't use solid-state +# drives. Because RocksDB's performance degrades as it stores more data, +# keeping full history is not advised, and using online delete is +# recommended. +# +# type = Cassandra +# +# Apache Cassandra is an open-source, distributed key-value store - see +# https://cassandra.apache.org/ for more details. +# +# Cassandra is an alternative backend to be used only with Reporting Mode. +# See the Reporting Mode section for more details about Reporting Mode. +# +# Required keys for NuDB and RocksDB: +# +# path Location to store the database +# +# Required keys for Cassandra: +# +# contact_points IP of a node in the Cassandra cluster +# +# port CQL Native Transport Port +# +# secure_connect_bundle +# Absolute path to a secure connect bundle. When using +# a secure connect bundle, contact_points and port are +# not required. +# +# keyspace Name of Cassandra keyspace to use +# +# table_name Name of table in above keyspace to use +# +# Optional keys +# +# cache_size Size of cache for database records. Default is 16384. +# Setting this value to 0 will use the default value. +# +# cache_age Length of time in minutes to keep database records +# cached. Default is 5 minutes. Setting this value to +# 0 will use the default value. +# +# Note: if neither cache_size nor cache_age is +# specified, the cache for database records will not +# be created. If only one of cache_size or cache_age +# is specified, the cache will be created using the +# default value for the unspecified parameter. +# +# Note: the cache will not be created if online_delete +# is specified, or if shards are used. +# +# Optional keys for NuDB or RocksDB: +# +# earliest_seq The default is 32570 to match the XRP ledger +# network's earliest allowed sequence. Alternate +# networks may set this value. Minimum value of 1. +# If a [shard_db] section is defined, and this +# value is present either [node_db] or [shard_db], +# it must be defined with the same value in both +# sections. +# +# online_delete Minimum value of 256. Enable automatic purging +# of older ledger information. Maintain at least this +# number of ledger records online. Must be greater +# than or equal to ledger_history. +# +# These keys modify the behavior of online_delete, and thus are only +# relevant if online_delete is defined and non-zero: +# +# advisory_delete 0 for disabled, 1 for enabled. If set, the +# administrative RPC call "can_delete" is required +# to enable online deletion of ledger records. +# Online deletion does not run automatically if +# non-zero and the last deletion was on a ledger +# greater than the current "can_delete" setting. +# Default is 0. +# +# delete_batch When automatically purging, SQLite database +# records are deleted in batches. This value +# controls the maximum size of each batch. Larger +# batches keep the databases locked for more time, +# which may cause other functions to fall behind, +# and thus cause the node to lose sync. +# Default is 100. +# +# back_off_milliseconds +# Number of milliseconds to wait between +# online_delete batches to allow other functions +# to catch up. +# Default is 100. +# +# age_threshold_seconds +# The online delete process will only run if the +# latest validated ledger is younger than this +# number of seconds. +# Default is 60. +# +# recovery_wait_seconds +# The online delete process checks periodically +# that rippled is still in sync with the network, +# and that the validated ledger is less than +# 'age_threshold_seconds' old. By default, if it +# is not the online delete process aborts and +# tries again later. If 'recovery_wait_seconds' +# is set and rippled is out of sync, but likely to +# recover quickly, then online delete will wait +# this number of seconds for rippled to get back +# into sync before it aborts. +# Set this value if the node is otherwise staying +# in sync, or recovering quickly, but the online +# delete process is unable to finish. +# Default is unset. +# +# Optional keys for Cassandra: +# +# username Username to use if Cassandra cluster requires +# authentication +# +# password Password to use if Cassandra cluster requires +# authentication +# +# max_requests_outstanding +# Limits the maximum number of concurrent database +# writes. Default is 10 million. For slower clusters, +# large numbers of concurrent writes can overload the +# cluster. Setting this option can help eliminate +# write timeouts and other write errors due to the +# cluster being overloaded. +# +# Notes: +# The 'node_db' entry configures the primary, persistent storage. +# +# The 'import_db' is used with the '--import' command line option to +# migrate the specified database into the current database given +# in the [node_db] section. +# +# [import_db] Settings for performing a one-time import (optional) +# [database_path] Path to the book-keeping databases. +# +# The server creates and maintains 4 to 5 bookkeeping SQLite databases in +# the 'database_path' location. If you omit this configuration setting, +# the server creates a directory called "db" located in the same place as +# your rippled.cfg file. +# Partial pathnames are relative to the location of the rippled executable. +# +# [shard_db] Settings for the Shard Database (optional) +# +# Format (without spaces): +# One or more lines of case-insensitive key / value pairs: +# '=' +# ... +# +# Example: +# path=db/shards/nudb +# +# Required keys: +# path Location to store the database +# +# Optional keys: +# max_historical_shards +# The maximum number of historical shards +# to store. +# +# [historical_shard_paths] Additional storage paths for the Shard Database (optional) +# +# Format (without spaces): +# One or more lines, each expressing a full path for storing historical shards: +# /mnt/disk1 +# /mnt/disk2 +# ... +# +# [sqlite] Tuning settings for the SQLite databases (optional) +# +# Format (without spaces): +# One or more lines of case-insensitive key / value pairs: +# '=' +# ... +# +# Example 1: +# safety_level=low +# +# Example 2: +# journal_mode=off +# synchronous=off +# +# WARNING: These settings can have significant effects on data integrity, +# particularly in systemic failure scenarios. It is strongly recommended +# that they be left at their defaults unless the server is having +# performance issues during normal operation or during automatic purging +# (online_delete) operations. A warning will be logged on startup if +# 'ledger_history' is configured to store more than 10,000,000 ledgers and +# any of these settings are less safe than the default. This is due to the +# inordinate amount of time and bandwidth it will take to safely rebuild a +# corrupted database of that size from other peers. +# +# Optional keys: +# +# safety_level Valid values: high, low +# The default is "high", which tunes the SQLite +# databases in the most reliable mode, and is +# equivalent to: +# journal_mode=wal +# synchronous=normal +# temp_store=file +# "low" is equivalent to: +# journal_mode=memory +# synchronous=off +# temp_store=memory +# These "low" settings trade speed and reduced I/O +# for a higher risk of data loss. See the +# individual settings below for more information. +# This setting may not be combined with any of the +# other tuning settings: "journal_mode", +# "synchronous", or "temp_store". +# +# journal_mode Valid values: delete, truncate, persist, memory, wal, off +# The default is "wal", which uses a write-ahead +# log to implement database transactions. +# Alternately, "memory" saves disk I/O, but if +# rippled crashes during a transaction, the +# database is likely to be corrupted. +# See https://www.sqlite.org/pragma.html#pragma_journal_mode +# for more details about the available options. +# This setting may not be combined with the +# "safety_level" setting. +# +# synchronous Valid values: off, normal, full, extra +# The default is "normal", which works well with +# the "wal" journal mode. Alternatively, "off" +# allows rippled to continue as soon as data is +# passed to the OS, which can significantly +# increase speed, but risks data corruption if +# the host computer crashes before writing that +# data to disk. +# See https://www.sqlite.org/pragma.html#pragma_synchronous +# for more details about the available options. +# This setting may not be combined with the +# "safety_level" setting. +# +# temp_store Valid values: default, file, memory +# The default is "file", which will use files +# for temporary database tables and indices. +# Alternatively, "memory" may save I/O, but +# rippled does not currently use many, if any, +# of these temporary objects. +# See https://www.sqlite.org/pragma.html#pragma_temp_store +# for more details about the available options. +# This setting may not be combined with the +# "safety_level" setting. +# +# [ledger_tx_tables] (optional) +# +# conninfo Info for connecting to Postgres. Format is +# postgres://[username]:[password]@[ip]/[database]. +# The database and user must already exist. If this +# section is missing and rippled is running in +# Reporting Mode, rippled will connect as the +# user running rippled to a database with the +# same name. On Linux and Mac OS X, the connection +# will take place using the server's UNIX domain +# socket. On Windows, through the localhost IP +# address. Default is empty. +# +# use_tx_tables Valid values: 1, 0 +# The default is 1 (true). Determines whether to use +# the SQLite transaction database. If set to 0, +# rippled will not write to the transaction database, +# and will reject tx, account_tx and tx_history RPCs. +# In Reporting Mode, this setting is ignored. +# +# max_connections Valid values: any positive integer up to 64 bit +# storage length. This configures the maximum +# number of concurrent connections to postgres. +# Default is the maximum possible value to +# fit in a 64 bit integer. +# +# timeout Number of seconds after which idle postgres +# connections are discconnected. If set to 0, +# connections never timeout. Default is 600. +# +# +# remember_ip Value values: 1, 0 +# Default is 1 (true). Whether to cache host and +# port connection settings. +# +# +#------------------------------------------------------------------------------- +# +# 7. Diagnostics +# +#--------------- +# +# These settings are designed to help server administrators diagnose +# problems, and obtain detailed information about the activities being +# performed by the rippled process. +# +# +# +# [debug_logfile] +# +# Specifies where a debug logfile is kept. By default, no debug log is kept. +# Unless absolute, the path is relative the directory containing this file. +# +# Example: debug.log +# +# +# +# [insight] +# +# Configuration parameters for the Beast. Insight stats collection module. +# +# Insight is a module that collects information from the areas of rippled +# that have instrumentation. The configuration parameters control where the +# collection metrics are sent. The parameters are expressed as key = value +# pairs with no white space. The main parameter is the choice of server: +# +# "server" +# +# Choice of server to send metrics to. Currently the only choice is +# "statsd" which sends UDP packets to a StatsD daemon, which must be +# running while rippled is running. More information on StatsD is +# available here: +# https://github.com/b/statsd_spec +# +# When server=statsd, these additional keys are used: +# +# "address" The UDP address and port of the listening StatsD server, +# in the format, n.n.n.n:port. +# +# "prefix" A string prepended to each collected metric. This is used +# to distinguish between different running instances of rippled. +# +# If this section is missing, or the server type is unspecified or unknown, +# statistics are not collected or reported. +# +# Example: +# +# [insight] +# server=statsd +# address=192.168.0.95:4201 +# prefix=my_validator +# +# [perf] +# +# Configuration of performance logging. If enabled, write Json-formatted +# performance-oriented data periodically to a distinct log file. +# +# "perf_log" A string specifying the pathname of the performance log +# file. A relative pathname will log relative to the +# configuration directory. Required to enable +# performance logging. +# +# "log_interval" Integer value for number of seconds between writing +# to performance log. Default 1. +# +# Example: +# [perf] +# perf_log=/var/log/rippled/perf.log +# log_interval=2 +# +#------------------------------------------------------------------------------- +# +# 8. Voting +# +#---------- +# +# The vote settings configure settings for the entire Ripple network. +# While a single instance of rippled cannot unilaterally enforce network-wide +# settings, these choices become part of the instance's vote during the +# consensus process for each voting ledger. +# +# [voting] +# +# A set of key/value pair parameters used during voting ledgers. +# +# reference_fee = +# +# The cost of the reference transaction fee, specified in drops. +# The reference transaction is the simplest form of transaction. +# It represents an XRP payment between two parties. +# +# If this parameter is unspecified, rippled will use an internal +# default. Don't change this without understanding the consequences. +# +# Example: +# reference_fee = 10 # 10 drops +# +# account_reserve = +# +# The account reserve requirement is specified in drops. The portion of an +# account's XRP balance that is at or below the reserve may only be +# spent on transaction fees, and not transferred out of the account. +# +# If this parameter is unspecified, rippled will use an internal +# default. Don't change this without understanding the consequences. +# +# Example: +# account_reserve = 20000000 # 20 XRP +# +# owner_reserve = +# +# The owner reserve is the amount of XRP reserved in the account for +# each ledger item owned by the account. Ledger items an account may +# own include trust lines, open orders, and tickets. +# +# If this parameter is unspecified, rippled will use an internal +# default. Don't change this without understanding the consequences. +# +# Example: +# owner_reserve = 5000000 # 5 XRP +# +#------------------------------------------------------------------------------- +# +# 9. Misc Settings +# +#----------------- +# +# [node_size] +# +# Tunes the servers based on the expected load and available memory. Legal +# sizes are "tiny", "small", "medium", "large", and "huge". We recommend +# you start at the default and raise the setting if you have extra memory. +# +# The code attempts to automatically determine the appropriate size for +# this parameter based on the amount of RAM and the number of execution +# cores available to the server. The current decision matrix is: +# +# | | Cores | +# |---------|------------------------| +# | RAM | 1 | 2 or 3 | ≥ 4 | +# |---------|------|--------|--------| +# | < ~8GB | tiny | tiny | tiny | +# | < ~12GB | tiny | small | small | +# | < ~16GB | tiny | small | medium | +# | < ~24GB | tiny | small | large | +# | < ~32GB | tiny | small | huge | +# +# [signing_support] +# +# Specifies whether the server will accept "sign" and "sign_for" commands +# from remote users. Even if the commands are sent over a secure protocol +# like secure websocket, this should generally be discouraged, because it +# requires sending the secret to use for signing to the server. In order +# to sign transactions, users should prefer to use a standalone signing +# tool instead. +# +# This flag has no effect on the "sign" and "sign_for" command line options +# that rippled makes available. +# +# The default value of this field is "false" +# +# Example: +# +# [signing_support] +# true +# +# [crawl] +# +# List of options to control what data is reported through the /crawl endpoint +# See https://xrpl.org/peer-crawler.html +# +# +# +# Enable or disable access to /crawl requests. Default is '1' which +# enables access. +# +# overlay = +# +# Report information about peers this server is connected to, similar +# to the "peers" RPC API. Default is '1' which means to report peer +# overlay info. +# +# server = +# +# Report information about the local server, similar to the "server_state" +# RPC API. Default is '1' which means to report local server info. +# +# counts = +# +# Report information about the local server health counters, similar to +# the "get_counts" RPC API. Default is '0' which means not to report +# server counts. +# +# unl = +# +# Report information about the local server's validator lists, similar to +# the "validators" and "validator_list_sites" RPC APIs. Default is '1' +# which means to report server validator lists. +# +# Examples: +# +# [crawl] +# 0 +# +# [crawl] +# overlay = 1 +# server = 1 +# counts = 0 +# unl = 1 +# +# [vl] +# +# Options to control what data is reported through the /vl endpoint +# See [...] +# +# enable = +# +# Enable or disable access to /vl requests. Default is '1' which +# enables access. +# +# [beta_rpc_api] +# +# 0 or 1. +# +# 0: Disable the beta API version for JSON-RPC and WebSocket [default] +# 1: Enable the beta API version for testing. The beta API version +# contains breaking changes that require a new API version number. +# They are not ready for public consumption. +# +#------------------------------------------------------------------------------- +# +# 10. Example Settings +# +#-------------------- +# +# Administrators can use these values as a starting point for configuring +# their instance of rippled, but each value should be checked to make sure +# it meets the business requirements for the organization. +# +# Server +# +# These example configuration settings create these ports: +# +# "peer" +# +# Peer protocol open to everyone. This is required to accept +# incoming rippled connections. This does not affect automatic +# or manual outgoing Peer protocol connections. +# +# "rpc" +# +# Administrative RPC commands over HTTPS, when originating from +# the same machine (via the loopback adapter at 127.0.0.1). +# +# "wss_admin" +# +# Admin level API commands over Secure Websockets, when originating +# from the same machine (via the loopback adapter at 127.0.0.1). +# +# This port is commented out but can be enabled by removing +# the '#' from each corresponding line including the entry under [server] +# +# "wss_public" +# +# Guest level API commands over Secure Websockets, open to everyone. +# +# For HTTPS and Secure Websockets ports, if no certificate and key file +# are specified then a self-signed certificate will be generated on startup. +# If you have a certificate and key file, uncomment the corresponding lines +# and ensure the paths to the files are correct. +# +# NOTE +# +# To accept connections on well known ports such as 80 (HTTP) or +# 443 (HTTPS), most operating systems will require rippled to +# run with administrator privileges, or else rippled will not start. + +[server] +port_rpc_admin_local +port_peer +port_ws_admin_local +port_ws_public +#port_grpc +#ssl_key = /etc/ssl/private/server.key +#ssl_cert = /etc/ssl/certs/server.crt + +[port_rpc_admin_local] +port = 5006 +ip = 127.0.0.1 +admin = 127.0.0.1 +protocol = http + +[port_peer] +port = 51235 +ip = 0.0.0.0 +# alternatively, to accept connections on IPv4 + IPv6, use: +#ip = :: +protocol = peer + +[port_ws_admin_local] +port = 6007 +ip = 127.0.0.1 +admin = 127.0.0.1 +protocol = ws + +#[port_grpc#] +#port = 50051 +#ip = 0.0.0.0 +#secure_gateway = 127.0.0.1 + +[port_ws_public] +port = 6008 +ip = 127.0.0.1 +protocol = ws + +#------------------------------------------------------------------------------- + +# This is primary persistent datastore for rippled. This includes transaction +# metadata, account states, and ledger headers. Helpful information can be +# found at https://xrpl.org/capacity-planning.html#node-db-type +# type=NuDB is recommended for non-validators with fast SSDs. Validators or +# slow / spinning disks should use RocksDB. Caution: Spinning disks are +# not recommended. They do not perform well enough to consistently remain +# synced to the network. +# online_delete=512 is recommended to delete old ledgers while maintaining at +# least 512. +# advisory_delete=0 allows the online delete process to run automatically +# when the node has approximately two times the "online_delete" value of +# ledgers. No external administrative command is required to initiate +# deletion. +[node_db] +type=NuDB +path=/var/lib/rippled-reporting/db/nudb +# online_delete=512 # +advisory_delete=0 + +# This is the persistent datastore for shards. It is important for the health +# of the ripple network that rippled operators shard as much as practical. +# NuDB requires SSD storage. Helpful information can be found at +# https://xrpl.org/history-sharding.html +#[shard_db] +#path=/var/lib/rippled/db/shards/nudb +#max_historical_shards=50 +# +# This optional section can be configured with a list +# of paths to use for storing historical shards. Each +# path must correspond to a unique filesystem. +#[historical_shard_paths] +#/path/1 +#/path/2 + +[database_path] +/var/lib/rippled-reporting/db + +# To use Postgres, uncomment this section and fill in the appropriate connection +# info. Postgres can only be used in Reporting Mode. +# To disable writing to the transaction database, uncomment this section, and +# set use_tx_tables=0 +# [ledger_tx_tables] +# conninfo = postgres://:@localhost/ +# use_tx_tables=1 + + +# This needs to be an absolute directory reference, not a relative one. +# Modify this value as required. +[debug_logfile] +/var/log/rippled-reporting/debug.log + +[sntp_servers] +time.windows.com +time.apple.com +time.nist.gov +pool.ntp.org + +# To use the XRP test network +# (see https://xrpl.org/connect-your-rippled-to-the-xrp-test-net.html), +# use the following [ips] section: +# [ips] +# r.altnet.rippletest.net 51235 + +# File containing trusted validator keys or validator list publishers. +# Unless an absolute path is specified, it will be considered relative to the +# folder in which the rippled.cfg file is located. +[validators_file] +/opt/rippled-reporting/etc/validators.txt + +# Turn down default logging to save disk space in the long run. +# Valid values here are trace, debug, info, warning, error, and fatal +[rpc_startup] +{ "command": "log_level", "severity": "info" } + +# If ssl_verify is 1, certificates will be validated. +# To allow the use of self-signed certificates for development or internal use, +# set to ssl_verify to 0. +[ssl_verify] +1 + + +# To run in Reporting Mode, uncomment this section and fill in the appropriate +# connection info for one or more ETL sources. +[reporting] +etl_source + +[etl_source] +source_grpc_port=50051 +source_ws_port=6005 +source_ip=127.0.0.1 diff --git a/src/ripple/app/ledger/AcceptedLedger.cpp b/src/ripple/app/ledger/AcceptedLedger.cpp index 41946f00984..526704d1889 100644 --- a/src/ripple/app/ledger/AcceptedLedger.cpp +++ b/src/ripple/app/ledger/AcceptedLedger.cpp @@ -19,8 +19,7 @@ #include #include -#include -#include +#include namespace ripple { @@ -29,29 +28,34 @@ AcceptedLedger::AcceptedLedger( Application& app) : mLedger(ledger) { + transactions_.reserve(256); + auto insertAll = [&](auto const& txns) { + auto const& idcache = app.accountIDCache(); + for (auto const& item : txns) - { - insert(std::make_shared( - ledger, - item.first, - item.second, - app.accountIDCache(), - app.logs())); - } + transactions_.emplace_back(std::make_unique( + ledger, item.first, item.second, idcache)); }; if (app.config().reporting()) - insertAll(flatFetchTransactions(*ledger, app)); + { + auto const txs = flatFetchTransactions(*ledger, app); + transactions_.reserve(txs.size()); + insertAll(txs); + } else + { + transactions_.reserve(256); insertAll(ledger->txs); -} - -void -AcceptedLedger::insert(AcceptedLedgerTx::ref at) -{ - assert(mMap.find(at->getIndex()) == mMap.end()); - mMap.insert(std::make_pair(at->getIndex(), at)); + } + + std::sort( + transactions_.begin(), + transactions_.end(), + [](auto const& a, auto const& b) { + return a->getTxnSeq() < b->getTxnSeq(); + }); } } // namespace ripple diff --git a/src/ripple/app/ledger/AcceptedLedger.h b/src/ripple/app/ledger/AcceptedLedger.h index 0575013695f..0187fdfb679 100644 --- a/src/ripple/app/ledger/AcceptedLedger.h +++ b/src/ripple/app/ledger/AcceptedLedger.h @@ -41,43 +41,40 @@ namespace ripple { the result of the a consensus process (though haven't validated it yet). */ -class AcceptedLedger +class AcceptedLedger : public CountedObject { public: - using pointer = std::shared_ptr; - using ret = const pointer&; - using map_t = std::map; - // mapt_t must be an ordered map! - using value_type = map_t::value_type; - using const_iterator = map_t::const_iterator; + AcceptedLedger( + std::shared_ptr const& ledger, + Application& app); -public: std::shared_ptr const& getLedger() const { return mLedger; } - const map_t& - getMap() const + + std::size_t + size() const { - return mMap; + return transactions_.size(); } - int - getTxnCount() const + auto + begin() const { - return mMap.size(); + return transactions_.begin(); } - AcceptedLedger( - std::shared_ptr const& ledger, - Application& app); + auto + end() const + { + return transactions_.end(); + } private: - void insert(AcceptedLedgerTx::ref); - std::shared_ptr mLedger; - map_t mMap; + std::vector> transactions_; }; } // namespace ripple diff --git a/src/ripple/app/ledger/AcceptedLedgerTx.cpp b/src/ripple/app/ledger/AcceptedLedgerTx.cpp index c92ebffe04b..f0408b0c049 100644 --- a/src/ripple/app/ledger/AcceptedLedgerTx.cpp +++ b/src/ripple/app/ledger/AcceptedLedgerTx.cpp @@ -18,7 +18,6 @@ //============================================================================== #include -#include #include #include #include @@ -30,72 +29,30 @@ AcceptedLedgerTx::AcceptedLedgerTx( std::shared_ptr const& ledger, std::shared_ptr const& txn, std::shared_ptr const& met, - AccountIDCache const& accountCache, - Logs& logs) - : mLedger(ledger) - , mTxn(txn) - , mMeta(std::make_shared( - txn->getTransactionID(), - ledger->seq(), - *met)) - , mAffected(mMeta->getAffectedAccounts(logs.journal("View"))) - , accountCache_(accountCache) - , logs_(logs) + AccountIDCache const& accountCache) + : mTxn(txn) + , mMeta(txn->getTransactionID(), ledger->seq(), *met) + , mAffected(mMeta.getAffectedAccounts()) { assert(!ledger->open()); - mResult = mMeta->getResultTER(); - Serializer s; met->add(s); mRawMeta = std::move(s.modData()); - buildJson(); -} - -AcceptedLedgerTx::AcceptedLedgerTx( - std::shared_ptr const& ledger, - std::shared_ptr const& txn, - TER result, - AccountIDCache const& accountCache, - Logs& logs) - : mLedger(ledger) - , mTxn(txn) - , mResult(result) - , mAffected(txn->getMentionedAccounts()) - , accountCache_(accountCache) - , logs_(logs) -{ - assert(ledger->open()); - buildJson(); -} - -std::string -AcceptedLedgerTx::getEscMeta() const -{ - assert(!mRawMeta.empty()); - return sqlBlobLiteral(mRawMeta); -} - -void -AcceptedLedgerTx::buildJson() -{ mJson = Json::objectValue; mJson[jss::transaction] = mTxn->getJson(JsonOptions::none); - if (mMeta) - { - mJson[jss::meta] = mMeta->getJson(JsonOptions::none); - mJson[jss::raw_meta] = strHex(mRawMeta); - } + mJson[jss::meta] = mMeta.getJson(JsonOptions::none); + mJson[jss::raw_meta] = strHex(mRawMeta); - mJson[jss::result] = transHuman(mResult); + mJson[jss::result] = transHuman(mMeta.getResultTER()); if (!mAffected.empty()) { Json::Value& affected = (mJson[jss::affected] = Json::arrayValue); for (auto const& account : mAffected) - affected.append(accountCache_.toBase58(account)); + affected.append(accountCache.toBase58(account)); } if (mTxn->getTxnType() == ttOFFER_CREATE) @@ -107,14 +64,21 @@ AcceptedLedgerTx::buildJson() if (account != amount.issue().account) { auto const ownerFunds = accountFunds( - *mLedger, + *ledger, account, amount, fhIGNORE_FREEZE, - logs_.journal("View")); + beast::Journal{beast::Journal::getNullSink()}); mJson[jss::transaction][jss::owner_funds] = ownerFunds.getText(); } } } +std::string +AcceptedLedgerTx::getEscMeta() const +{ + assert(!mRawMeta.empty()); + return sqlBlobLiteral(mRawMeta); +} + } // namespace ripple diff --git a/src/ripple/app/ledger/AcceptedLedgerTx.h b/src/ripple/app/ledger/AcceptedLedgerTx.h index 712e085b6cc..7d68978571b 100644 --- a/src/ripple/app/ledger/AcceptedLedgerTx.h +++ b/src/ripple/app/ledger/AcceptedLedgerTx.h @@ -39,40 +39,22 @@ class Logs; - Which accounts are affected * This is used by InfoSub to report to clients - Cached stuff - - @code - @endcode - - @see {uri} - - @ingroup ripple_ledger */ -class AcceptedLedgerTx +class AcceptedLedgerTx : public CountedObject { -public: - using pointer = std::shared_ptr; - using ref = const pointer&; - public: AcceptedLedgerTx( std::shared_ptr const& ledger, std::shared_ptr const&, std::shared_ptr const&, - AccountIDCache const&, - Logs&); - AcceptedLedgerTx( - std::shared_ptr const&, - std::shared_ptr const&, - TER, - AccountIDCache const&, - Logs&); + AccountIDCache const&); std::shared_ptr const& getTxn() const { return mTxn; } - std::shared_ptr const& + TxMeta const& getMeta() const { return mMeta; @@ -97,45 +79,28 @@ class AcceptedLedgerTx TER getResult() const { - return mResult; + return mMeta.getResultTER(); } std::uint32_t getTxnSeq() const { - return mMeta->getIndex(); - } - - bool - isApplied() const - { - return bool(mMeta); - } - int - getIndex() const - { - return mMeta ? mMeta->getIndex() : 0; + return mMeta.getIndex(); } std::string getEscMeta() const; - Json::Value + + Json::Value const& getJson() const { return mJson; } private: - std::shared_ptr mLedger; std::shared_ptr mTxn; - std::shared_ptr mMeta; - TER mResult; + TxMeta mMeta; boost::container::flat_set mAffected; Blob mRawMeta; Json::Value mJson; - AccountIDCache const& accountCache_; - Logs& logs_; - - void - buildJson(); }; } // namespace ripple diff --git a/src/ripple/app/ledger/InboundLedger.h b/src/ripple/app/ledger/InboundLedger.h index 25f64447649..287dbaf7f16 100644 --- a/src/ripple/app/ledger/InboundLedger.h +++ b/src/ripple/app/ledger/InboundLedger.h @@ -39,9 +39,6 @@ class InboundLedger final : public TimeoutCounter, public: using clock_type = beast::abstract_clock; - using PeerDataPairType = - std::pair, std::shared_ptr>; - // These are the reasons we might acquire a ledger enum class Reason { HISTORY, // Acquiring past ledger @@ -193,7 +190,9 @@ class InboundLedger final : public TimeoutCounter, // Data we have received from peers std::mutex mReceivedDataLock; - std::vector mReceivedData; + std::vector< + std::pair, std::shared_ptr>> + mReceivedData; bool mReceiveDispatched; std::unique_ptr mPeerSet; }; diff --git a/src/ripple/app/ledger/LedgerHistory.cpp b/src/ripple/app/ledger/LedgerHistory.cpp index f407b2064ea..53c723e1469 100644 --- a/src/ripple/app/ledger/LedgerHistory.cpp +++ b/src/ripple/app/ledger/LedgerHistory.cpp @@ -26,14 +26,6 @@ namespace ripple { -// VFALCO TODO replace macros - -#ifndef CACHED_LEDGER_NUM -#define CACHED_LEDGER_NUM 96 -#endif - -std::chrono::seconds constexpr CachedLedgerAge = std::chrono::minutes{2}; - // FIXME: Need to clean up ledgers by index at some point LedgerHistory::LedgerHistory( @@ -44,8 +36,8 @@ LedgerHistory::LedgerHistory( , mismatch_counter_(collector->make_counter("ledger.history", "mismatch")) , m_ledgers_by_hash( "LedgerCache", - CACHED_LEDGER_NUM, - CachedLedgerAge, + app_.config().getValueFor(SizedItem::ledgerSize), + std::chrono::seconds{app_.config().getValueFor(SizedItem::ledgerAge)}, stopwatch(), app_.journal("TaggedCache")) , m_consensus_validated( @@ -523,13 +515,6 @@ LedgerHistory::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash) return true; } -void -LedgerHistory::tune(int size, std::chrono::seconds age) -{ - m_ledgers_by_hash.setTargetSize(size); - m_ledgers_by_hash.setTargetAge(age); -} - void LedgerHistory::clearLedgerCachePrior(LedgerIndex seq) { diff --git a/src/ripple/app/ledger/LedgerHistory.h b/src/ripple/app/ledger/LedgerHistory.h index f8d4318dbea..be5c559beed 100644 --- a/src/ripple/app/ledger/LedgerHistory.h +++ b/src/ripple/app/ledger/LedgerHistory.h @@ -70,13 +70,6 @@ class LedgerHistory LedgerHash getLedgerHash(LedgerIndex ledgerIndex); - /** Set the history cache's parameters - @param size The target size of the cache - @param age The target age of the cache, in seconds - */ - void - tune(int size, std::chrono::seconds age); - /** Remove stale cache entries */ void diff --git a/src/ripple/app/ledger/LedgerHolder.h b/src/ripple/app/ledger/LedgerHolder.h index 449cff9ab0c..93d67400e05 100644 --- a/src/ripple/app/ledger/LedgerHolder.h +++ b/src/ripple/app/ledger/LedgerHolder.h @@ -20,6 +20,7 @@ #ifndef RIPPLE_APP_LEDGER_LEDGERHOLDER_H_INCLUDED #define RIPPLE_APP_LEDGER_LEDGERHOLDER_H_INCLUDED +#include #include #include @@ -35,7 +36,7 @@ namespace ripple { way the object always holds a value. We can use the genesis ledger in all cases. */ -class LedgerHolder +class LedgerHolder : public CountedObject { public: // Update the held ledger diff --git a/src/ripple/app/ledger/LedgerMaster.h b/src/ripple/app/ledger/LedgerMaster.h index dbb01f54a4c..802df8eb5cb 100644 --- a/src/ripple/app/ledger/LedgerMaster.h +++ b/src/ripple/app/ledger/LedgerMaster.h @@ -219,8 +219,6 @@ class LedgerMaster : public AbstractFetchPackContainer bool getFullValidatedRange(std::uint32_t& minVal, std::uint32_t& maxVal); - void - tune(int size, std::chrono::seconds age); void sweep(); float diff --git a/src/ripple/app/ledger/LedgerReplay.h b/src/ripple/app/ledger/LedgerReplay.h index 96af63c1354..0365dea1b7e 100644 --- a/src/ripple/app/ledger/LedgerReplay.h +++ b/src/ripple/app/ledger/LedgerReplay.h @@ -20,6 +20,7 @@ #ifndef RIPPLE_APP_LEDGER_LEDGERREPLAY_H_INCLUDED #define RIPPLE_APP_LEDGER_LEDGERREPLAY_H_INCLUDED +#include #include #include #include @@ -29,7 +30,7 @@ namespace ripple { class Ledger; class STTx; -class LedgerReplay +class LedgerReplay : public CountedObject { std::shared_ptr parent_; std::shared_ptr replay_; diff --git a/src/ripple/app/ledger/OrderBookDB.cpp b/src/ripple/app/ledger/OrderBookDB.cpp index b9f72b71523..343e7f6269a 100644 --- a/src/ripple/app/ledger/OrderBookDB.cpp +++ b/src/ripple/app/ledger/OrderBookDB.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -28,70 +29,72 @@ namespace ripple { OrderBookDB::OrderBookDB(Application& app) - : app_(app), mSeq(0), j_(app.journal("OrderBookDB")) + : app_(app), seq_(0), j_(app.journal("OrderBookDB")) { } -void -OrderBookDB::invalidate() -{ - std::lock_guard sl(mLock); - mSeq = 0; -} - void OrderBookDB::setup(std::shared_ptr const& ledger) { + if (!app_.config().standalone() && app_.getOPs().isNeedNetworkLedger()) { - std::lock_guard sl(mLock); - auto seq = ledger->info().seq; + JLOG(j_.warn()) << "Eliding full order book update: no ledger"; + return; + } - // Do a full update every 256 ledgers - if (mSeq != 0) - { - if (seq == mSeq) - return; - if ((seq > mSeq) && ((seq - mSeq) < 256)) - return; - if ((seq < mSeq) && ((mSeq - seq) < 16)) - return; - } + auto seq = seq_.load(); - JLOG(j_.debug()) << "Advancing from " << mSeq << " to " << seq; + if (seq != 0) + { + if ((seq > ledger->seq()) && ((ledger->seq() - seq) < 25600)) + return; - mSeq = seq; + if ((ledger->seq() <= seq) && ((seq - ledger->seq()) < 16)) + return; } + if (seq_.exchange(ledger->seq()) != seq) + return; + + JLOG(j_.debug()) << "Full order book update: " << seq << " to " + << ledger->seq(); + if (app_.config().PATH_SEARCH_MAX != 0) { if (app_.config().standalone()) update(ledger); else app_.getJobQueue().addJob( - jtUPDATE_PF, "OrderBookDB::update", [this, ledger]() { - update(ledger); - }); + jtUPDATE_PF, + "OrderBookDB::update: " + std::to_string(ledger->seq()), + [this, ledger]() { update(ledger); }); } } void OrderBookDB::update(std::shared_ptr const& ledger) { - hash_set seen; - OrderBookDB::IssueToOrderBook destMap; - OrderBookDB::IssueToOrderBook sourceMap; - hash_set XRPBooks; - - JLOG(j_.debug()) << "OrderBookDB::update>"; - if (app_.config().PATH_SEARCH_MAX == 0) + return; // pathfinding has been disabled + + // A newer full update job is pending + if (auto const seq = seq_.load(); seq > ledger->seq()) { - // pathfinding has been disabled + JLOG(j_.debug()) << "Eliding update for " << ledger->seq() + << " because of pending update to later " << seq; return; } + decltype(allBooks_) allBooks; + decltype(xrpBooks_) xrpBooks; + + allBooks.reserve(allBooks_.size()); + xrpBooks.reserve(xrpBooks_.size()); + + JLOG(j_.debug()) << "Beginning update (" << ledger->seq() << ")"; + // walk through the entire ledger looking for orderbook entries - int books = 0; + int cnt = 0; try { @@ -100,9 +103,8 @@ OrderBookDB::update(std::shared_ptr const& ledger) if (app_.isStopping()) { JLOG(j_.info()) - << "OrderBookDB::update exiting due to isStopping"; - std::lock_guard sl(mLock); - mSeq = 0; + << "Update halted because the process is stopping"; + seq_.store(0); return; } @@ -111,40 +113,38 @@ OrderBookDB::update(std::shared_ptr const& ledger) sle->getFieldH256(sfRootIndex) == sle->key()) { Book book; + book.in.currency = sle->getFieldH160(sfTakerPaysCurrency); book.in.account = sle->getFieldH160(sfTakerPaysIssuer); - book.out.account = sle->getFieldH160(sfTakerGetsIssuer); book.out.currency = sle->getFieldH160(sfTakerGetsCurrency); + book.out.account = sle->getFieldH160(sfTakerGetsIssuer); + + allBooks[book.in].insert(book.out); + + if (isXRP(book.out)) + xrpBooks.insert(book.in); - uint256 index = getBookBase(book); - if (seen.insert(index).second) - { - auto orderBook = std::make_shared(index, book); - sourceMap[book.in].push_back(orderBook); - destMap[book.out].push_back(orderBook); - if (isXRP(book.out)) - XRPBooks.insert(book.in); - ++books; - } + ++cnt; } } } catch (SHAMapMissingNode const& mn) { - JLOG(j_.info()) << "OrderBookDB::update: " << mn.what(); - std::lock_guard sl(mLock); - mSeq = 0; + JLOG(j_.info()) << "Missing node in " << ledger->seq() + << " during update: " << mn.what(); + seq_.store(0); return; } - JLOG(j_.debug()) << "OrderBookDB::update< " << books << " books found"; + JLOG(j_.debug()) << "Update completed (" << ledger->seq() << "): " << cnt + << " books found"; + { std::lock_guard sl(mLock); - - mXRPBooks.swap(XRPBooks); - mSourceMap.swap(sourceMap); - mDestMap.swap(destMap); + allBooks_.swap(allBooks); + xrpBooks_.swap(xrpBooks); } + app_.getLedgerMaster().newOrderBookDB(); } @@ -152,60 +152,50 @@ void OrderBookDB::addOrderBook(Book const& book) { bool toXRP = isXRP(book.out); + std::lock_guard sl(mLock); - if (toXRP) - { - // We don't want to search through all the to-XRP or from-XRP order - // books! - for (auto ob : mSourceMap[book.in]) - { - if (isXRP(ob->getCurrencyOut())) // also to XRP - return; - } - } - else - { - for (auto ob : mDestMap[book.out]) - { - if (ob->getCurrencyIn() == book.in.currency && - ob->getIssuerIn() == book.in.account) - { - return; - } - } - } - uint256 index = getBookBase(book); - auto orderBook = std::make_shared(index, book); + allBooks_[book.in].insert(book.out); - mSourceMap[book.in].push_back(orderBook); - mDestMap[book.out].push_back(orderBook); if (toXRP) - mXRPBooks.insert(book.in); + xrpBooks_.insert(book.in); } // return list of all orderbooks that want this issuerID and currencyID -OrderBook::List +std::vector OrderBookDB::getBooksByTakerPays(Issue const& issue) { - std::lock_guard sl(mLock); - auto it = mSourceMap.find(issue); - return it == mSourceMap.end() ? OrderBook::List() : it->second; + std::vector ret; + + { + std::lock_guard sl(mLock); + + if (auto it = allBooks_.find(issue); it != allBooks_.end()) + { + ret.reserve(it->second.size()); + + for (auto const& gets : it->second) + ret.push_back(Book(issue, gets)); + } + } + + return ret; } int OrderBookDB::getBookSize(Issue const& issue) { std::lock_guard sl(mLock); - auto it = mSourceMap.find(issue); - return it == mSourceMap.end() ? 0 : it->second.size(); + if (auto it = allBooks_.find(issue); it != allBooks_.end()) + return static_cast(it->second.size()); + return 0; } bool OrderBookDB::isBookToXRP(Issue const& issue) { std::lock_guard sl(mLock); - return mXRPBooks.count(issue) > 0; + return xrpBooks_.count(issue) > 0; } BookListeners::pointer @@ -247,63 +237,49 @@ OrderBookDB::processTxn( Json::Value const& jvObj) { std::lock_guard sl(mLock); - if (alTx.getResult() == tesSUCCESS) + + // For this particular transaction, maintain the set of unique + // subscriptions that have already published it. This prevents sending + // the transaction multiple times if it touches multiple ltOFFER + // entries for the same book, or if it touches multiple books and a + // single client has subscribed to those books. + hash_set havePublished; + + for (auto const& node : alTx.getMeta().getNodes()) { - // For this particular transaction, maintain the set of unique - // subscriptions that have already published it. This prevents sending - // the transaction multiple times if it touches multiple ltOFFER - // entries for the same book, or if it touches multiple books and a - // single client has subscribed to those books. - hash_set havePublished; - - // Check if this is an offer or an offer cancel or a payment that - // consumes an offer. - // Check to see what the meta looks like. - for (auto& node : alTx.getMeta()->getNodes()) + try { - try + if (node.getFieldU16(sfLedgerEntryType) == ltOFFER) { - if (node.getFieldU16(sfLedgerEntryType) == ltOFFER) - { - SField const* field = nullptr; - - // We need a field that contains the TakerGets and TakerPays - // parameters. - if (node.getFName() == sfModifiedNode) - field = &sfPreviousFields; - else if (node.getFName() == sfCreatedNode) - field = &sfNewFields; - else if (node.getFName() == sfDeletedNode) - field = &sfFinalFields; - - if (field) + auto process = [&, this](SField const& field) { + if (auto data = dynamic_cast( + node.peekAtPField(field)); + data && data->isFieldPresent(sfTakerPays) && + data->isFieldPresent(sfTakerGets)) { - auto data = dynamic_cast( - node.peekAtPField(*field)); - - if (data && data->isFieldPresent(sfTakerPays) && - data->isFieldPresent(sfTakerGets)) - { - // determine the OrderBook - Book b{ - data->getFieldAmount(sfTakerGets).issue(), - data->getFieldAmount(sfTakerPays).issue()}; - - auto listeners = getBookListeners(b); - if (listeners) - { - listeners->publish(jvObj, havePublished); - } - } + auto listeners = getBookListeners( + {data->getFieldAmount(sfTakerGets).issue(), + data->getFieldAmount(sfTakerPays).issue()}); + if (listeners) + listeners->publish(jvObj, havePublished); } - } - } - catch (std::exception const&) - { - JLOG(j_.info()) - << "Fields not found in OrderBookDB::processTxn"; + }; + + // We need a field that contains the TakerGets and TakerPays + // parameters. + if (node.getFName() == sfModifiedNode) + process(sfPreviousFields); + else if (node.getFName() == sfCreatedNode) + process(sfNewFields); + else if (node.getFName() == sfDeletedNode) + process(sfFinalFields); } } + catch (std::exception const& ex) + { + JLOG(j_.info()) + << "processTxn: field not found (" << ex.what() << ")"; + } } } diff --git a/src/ripple/app/ledger/OrderBookDB.h b/src/ripple/app/ledger/OrderBookDB.h index 3b6939013a3..ea7c60c5f5b 100644 --- a/src/ripple/app/ledger/OrderBookDB.h +++ b/src/ripple/app/ledger/OrderBookDB.h @@ -23,7 +23,6 @@ #include #include #include -#include #include namespace ripple { @@ -37,15 +36,13 @@ class OrderBookDB setup(std::shared_ptr const& ledger); void update(std::shared_ptr const& ledger); - void - invalidate(); void addOrderBook(Book const&); /** @return a list of all orderbooks that want this issuerID and currencyID. */ - OrderBook::List + std::vector getBooksByTakerPays(Issue const&); /** @return a count of all orderbooks that want this issuerID and @@ -68,22 +65,14 @@ class OrderBookDB const AcceptedLedgerTx& alTx, Json::Value const& jvObj); - using IssueToOrderBook = hash_map; - private: - void - rawAddBook(Book const&); - Application& app_; - // by ci/ii - IssueToOrderBook mSourceMap; - - // by co/io - IssueToOrderBook mDestMap; + // Maps order books by "issue in" to "issue out": + hardened_hash_map> allBooks_; // does an order book to XRP exist - hash_set mXRPBooks; + hash_set xrpBooks_; std::recursive_mutex mLock; @@ -91,7 +80,7 @@ class OrderBookDB BookToListenersMap mListeners; - std::uint32_t mSeq; + std::atomic seq_; beast::Journal const j_; }; diff --git a/src/ripple/app/ledger/README.md b/src/ripple/app/ledger/README.md index d57e7044eca..cf7844856b5 100644 --- a/src/ripple/app/ledger/README.md +++ b/src/ripple/app/ledger/README.md @@ -162,7 +162,7 @@ There are also indirect peer queries. If there have been timeouts while acquiring ledger data then a server may issue indirect queries. In that case the server receiving the indirect query passes the query along to any of its peers that may have the requested data. This is important if the -network has a byzantine failure. If also helps protect the validation +network has a byzantine failure. It also helps protect the validation network. A validator may need to get a peer set from one of the other validators, and indirect queries improve the likelihood of success with that. @@ -487,4 +487,3 @@ ledger(s) for missing nodes in the back end node store --- # References # - diff --git a/src/ripple/app/ledger/impl/InboundLedger.cpp b/src/ripple/app/ledger/impl/InboundLedger.cpp index 979c1454410..d24c451a12f 100644 --- a/src/ripple/app/ledger/impl/InboundLedger.cpp +++ b/src/ripple/app/ledger/impl/InboundLedger.cpp @@ -33,7 +33,10 @@ #include #include +#include + #include +#include namespace ripple { @@ -57,15 +60,15 @@ enum { // Number of nodes to find initially , - missingNodesFind = 256 + missingNodesFind = 512 // Number of nodes to request for a reply , - reqNodesReply = 128 + reqNodesReply = 256 // Number of nodes to request blindly , - reqNodes = 8 + reqNodes = 12 }; // millisecond for each ledger timeout @@ -601,7 +604,7 @@ InboundLedger::trigger(std::shared_ptr const& peer, TriggerReason reason) tmBH.set_ledgerhash(hash_.begin(), hash_.size()); for (auto const& p : need) { - JLOG(journal_.warn()) << "Want: " << p.second; + JLOG(journal_.debug()) << "Want: " << p.second; if (!typeSet) { @@ -661,15 +664,15 @@ InboundLedger::trigger(std::shared_ptr const& peer, TriggerReason reason) if (reason != TriggerReason::reply) { // If we're querying blind, don't query deep - tmGL.set_querydepth(0); + tmGL.set_querydepth(1); } else if (peer && peer->isHighLatency()) { // If the peer has high latency, query extra deep - tmGL.set_querydepth(2); + tmGL.set_querydepth(3); } else - tmGL.set_querydepth(1); + tmGL.set_querydepth(2); // Get the state data first because it's the most likely to be useful // if we wind up abandoning this fetch. @@ -952,22 +955,23 @@ InboundLedger::receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode& san) try { + auto const f = filter.get(); + for (auto const& node : packet.nodes()) { auto const nodeID = deserializeSHAMapNodeID(node.nodeid()); if (!nodeID) - { - san.incInvalid(); - return; - } + throw std::runtime_error("data does not properly deserialize"); if (nodeID->isRoot()) - san += map.addRootNode( - rootHash, makeSlice(node.nodedata()), filter.get()); + { + san += map.addRootNode(rootHash, makeSlice(node.nodedata()), f); + } else - san += map.addKnownNode( - *nodeID, makeSlice(node.nodedata()), filter.get()); + { + san += map.addKnownNode(*nodeID, makeSlice(node.nodedata()), f); + } if (!san.isGood()) { @@ -1120,19 +1124,19 @@ InboundLedger::processData( std::shared_ptr peer, protocol::TMLedgerData& packet) { - ScopedLockType sl(mtx_); - if (packet.type() == protocol::liBASE) { - if (packet.nodes_size() < 1) + if (packet.nodes().empty()) { - JLOG(journal_.warn()) << "Got empty header data"; + JLOG(journal_.warn()) << peer->id() << ": empty header data"; peer->charge(Resource::feeInvalidRequest); return -1; } SHAMapAddNode san; + ScopedLockType sl(mtx_); + try { if (!mHaveHeader) @@ -1177,13 +1181,18 @@ InboundLedger::processData( if ((packet.type() == protocol::liTX_NODE) || (packet.type() == protocol::liAS_NODE)) { - if (packet.nodes().size() == 0) + std::string type = packet.type() == protocol::liTX_NODE ? "liTX_NODE: " + : "liAS_NODE: "; + + if (packet.nodes().empty()) { - JLOG(journal_.info()) << "Got response with no nodes"; + JLOG(journal_.info()) << peer->id() << ": response with no nodes"; peer->charge(Resource::feeInvalidRequest); return -1; } + ScopedLockType sl(mtx_); + // Verify node IDs and data are complete for (auto const& node : packet.nodes()) { @@ -1198,14 +1207,10 @@ InboundLedger::processData( SHAMapAddNode san; receiveNode(packet, san); - if (packet.type() == protocol::liTX_NODE) - { - JLOG(journal_.debug()) << "Ledger TX node stats: " << san.get(); - } - else - { - JLOG(journal_.debug()) << "Ledger AS node stats: " << san.get(); - } + JLOG(journal_.debug()) + << "Ledger " + << ((packet.type() == protocol::liTX_NODE) ? "TX" : "AS") + << " node stats: " << san.get(); if (san.isUseful()) progress_ = true; @@ -1217,20 +1222,100 @@ InboundLedger::processData( return -1; } +namespace detail { +// Track the amount of useful data that each peer returns +struct PeerDataCounts +{ + // Map from peer to amount of useful the peer returned + std::unordered_map, int> counts; + // The largest amount of useful data that any peer returned + int maxCount = 0; + + // Update the data count for a peer + void + update(std::shared_ptr&& peer, int dataCount) + { + if (dataCount <= 0) + return; + maxCount = std::max(maxCount, dataCount); + auto i = counts.find(peer); + if (i == counts.end()) + { + counts.emplace(std::move(peer), dataCount); + return; + } + i->second = std::max(i->second, dataCount); + } + + // Prune all the peers that didn't return enough data. + void + prune() + { + // Remove all the peers that didn't return at least half as much data as + // the best peer + auto const thresh = maxCount / 2; + auto i = counts.begin(); + while (i != counts.end()) + { + if (i->second < thresh) + i = counts.erase(i); + else + ++i; + } + } + + // call F with the `peer` parameter with a random sample of at most n values + // of the counts vector. + template + void + sampleN(std::size_t n, F&& f) + { + if (counts.empty()) + return; + + auto outFunc = [&f](auto&& v) { f(v.first); }; + std::minstd_rand rng{std::random_device{}()}; +#if _MSC_VER + std::vector, int>> s; + s.reserve(n); + std::sample( + counts.begin(), counts.end(), std::back_inserter(s), n, rng); + for (auto& v : s) + { + outFunc(v); + } +#else + std::sample( + counts.begin(), + counts.end(), + boost::make_function_output_iterator(outFunc), + n, + rng); +#endif + } +}; +} // namespace detail + /** Process pending TMLedgerData - Query the 'best' peer + Query the a random sample of the 'best' peers */ void InboundLedger::runData() { - std::shared_ptr chosenPeer; - int chosenPeerCount = -1; + // Maximum number of peers to request data from + constexpr std::size_t maxUsefulPeers = 6; - std::vector data; + decltype(mReceivedData) data; + + // Reserve some memory so the first couple iterations don't reallocate + data.reserve(8); + + detail::PeerDataCounts dataCounts; for (;;) { data.clear(); + { std::lock_guard sl(mReceivedDataLock); @@ -1243,24 +1328,22 @@ InboundLedger::runData() data.swap(mReceivedData); } - // Select the peer that gives us the most nodes that are useful, - // breaking ties in favor of the peer that responded first. for (auto& entry : data) { if (auto peer = entry.first.lock()) { int count = processData(peer, *(entry.second)); - if (count > chosenPeerCount) - { - chosenPeerCount = count; - chosenPeer = std::move(peer); - } + dataCounts.update(std::move(peer), count); } } } - if (chosenPeer) - trigger(chosenPeer, TriggerReason::reply); + // Select a random sample of the peers that gives us the most nodes that are + // useful + dataCounts.prune(); + dataCounts.sampleN(maxUsefulPeers, [&](std::shared_ptr const& peer) { + trigger(peer, TriggerReason::reply); + }); } Json::Value diff --git a/src/ripple/app/ledger/impl/InboundLedgers.cpp b/src/ripple/app/ledger/impl/InboundLedgers.cpp index 76681ea0a9d..7ee49b4547a 100644 --- a/src/ripple/app/ledger/impl/InboundLedgers.cpp +++ b/src/ripple/app/ledger/impl/InboundLedgers.cpp @@ -74,6 +74,12 @@ class InboundLedgersImp : public InboundLedgers reason != InboundLedger::Reason::SHARD || (seq != 0 && app_.getShardStore())); + // probably not the right rule + if (app_.getOPs().isNeedNetworkLedger() && + (reason != InboundLedger::Reason::GENERIC) && + (reason != InboundLedger::Reason::CONSENSUS)) + return {}; + bool isNew = true; std::shared_ptr inbound; { @@ -82,6 +88,7 @@ class InboundLedgersImp : public InboundLedgers { return {}; } + auto it = mLedgers.find(hash); if (it != mLedgers.end()) { diff --git a/src/ripple/app/ledger/impl/InboundTransactions.cpp b/src/ripple/app/ledger/impl/InboundTransactions.cpp index 7bccf26aa46..7a863bce16b 100644 --- a/src/ripple/app/ledger/impl/InboundTransactions.cpp +++ b/src/ripple/app/ledger/impl/InboundTransactions.cpp @@ -71,6 +71,7 @@ class InboundTransactionsImp : public InboundTransactions , m_zeroSet(m_map[uint256()]) , m_gotSet(std::move(gotSet)) , m_peerSetBuilder(std::move(peerSetBuilder)) + , j_(app_.journal("InboundTransactions")) { m_zeroSet.mSet = std::make_shared( SHAMapType::TRANSACTION, uint256(), app_.getNodeFamily()); @@ -99,9 +100,7 @@ class InboundTransactionsImp : public InboundTransactions { std::lock_guard sl(mLock); - auto it = m_map.find(hash); - - if (it != m_map.end()) + if (auto it = m_map.find(hash); it != m_map.end()) { if (acquire) { @@ -140,11 +139,8 @@ class InboundTransactionsImp : public InboundTransactions { protocol::TMLedgerData& packet = *packet_ptr; - JLOG(app_.journal("InboundLedger").trace()) - << "Got data (" << packet.nodes().size() - << ") " - "for acquiring ledger: " - << hash; + JLOG(j_.trace()) << "Got data (" << packet.nodes().size() + << ") for acquiring ledger: " << hash; TransactionAcquire::pointer ta = getAcquire(hash); @@ -154,8 +150,9 @@ class InboundTransactionsImp : public InboundTransactions return; } - std::list nodeIDs; - std::list nodeData; + std::vector> data; + data.reserve(packet.nodes().size()); + for (auto const& node : packet.nodes()) { if (!node.has_nodeid() || !node.has_nodedata()) @@ -172,12 +169,10 @@ class InboundTransactionsImp : public InboundTransactions return; } - nodeIDs.emplace_back(*id); - nodeData.emplace_back( - node.nodedata().begin(), node.nodedata().end()); + data.emplace_back(std::make_pair(*id, makeSlice(node.nodedata()))); } - if (!ta->takeNodes(nodeIDs, nodeData, peer).isUseful()) + if (!ta->takeNodes(data, peer).isUseful()) peer->charge(Resource::feeUnwantedData); } @@ -262,6 +257,8 @@ class InboundTransactionsImp : public InboundTransactions std::function const&, bool)> m_gotSet; std::unique_ptr m_peerSetBuilder; + + beast::Journal j_; }; //------------------------------------------------------------------------------ diff --git a/src/ripple/app/ledger/impl/LedgerMaster.cpp b/src/ripple/app/ledger/impl/LedgerMaster.cpp index 0dd0ba1eec0..3bae67f655d 100644 --- a/src/ripple/app/ledger/impl/LedgerMaster.cpp +++ b/src/ripple/app/ledger/impl/LedgerMaster.cpp @@ -261,8 +261,13 @@ LedgerMaster::getPublishedLedgerAge() std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch(); ret -= pubClose; ret = (ret > 0s) ? ret : 0s; + static std::chrono::seconds lastRet = -1s; - JLOG(m_journal.trace()) << "Published ledger age is " << ret.count(); + if (ret != lastRet) + { + JLOG(m_journal.trace()) << "Published ledger age is " << ret.count(); + lastRet = ret; + } return ret; } @@ -287,8 +292,13 @@ LedgerMaster::getValidatedLedgerAge() std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch(); ret -= valClose; ret = (ret > 0s) ? ret : 0s; + static std::chrono::seconds lastRet = -1s; - JLOG(m_journal.trace()) << "Validated ledger age is " << ret.count(); + if (ret != lastRet) + { + JLOG(m_journal.trace()) << "Validated ledger age is " << ret.count(); + lastRet = ret; + } return ret; } @@ -1483,12 +1493,14 @@ LedgerMaster::updatePaths() if (app_.getOPs().isNeedNetworkLedger()) { --mPathFindThread; + JLOG(m_journal.debug()) << "Need network ledger for updating paths"; return; } } while (!app_.getJobQueue().isStopping()) { + JLOG(m_journal.debug()) << "updatePaths running"; std::shared_ptr lastLedger; { std::lock_guard ml(m_mutex); @@ -1506,6 +1518,7 @@ LedgerMaster::updatePaths() else { // Nothing to do --mPathFindThread; + JLOG(m_journal.debug()) << "Nothing to do for updating paths"; return; } } @@ -1527,7 +1540,31 @@ LedgerMaster::updatePaths() try { - app_.getPathRequests().updateAll(lastLedger); + auto& pathRequests = app_.getPathRequests(); + { + std::lock_guard ml(m_mutex); + if (!pathRequests.requestsPending()) + { + --mPathFindThread; + JLOG(m_journal.debug()) + << "No path requests found. Nothing to do for updating " + "paths. " + << mPathFindThread << " jobs remaining"; + return; + } + } + JLOG(m_journal.debug()) << "Updating paths"; + pathRequests.updateAll(lastLedger); + + std::lock_guard ml(m_mutex); + if (!pathRequests.requestsPending()) + { + JLOG(m_journal.debug()) + << "No path requests left. No need for further updating " + "paths"; + --mPathFindThread; + return; + } } catch (SHAMapMissingNode const& mn) { @@ -1587,8 +1624,12 @@ LedgerMaster::newPFWork( const char* name, std::unique_lock&) { - if (mPathFindThread < 2) + if (!app_.isStopping() && mPathFindThread < 2 && + app_.getPathRequests().requestsPending()) { + JLOG(m_journal.debug()) + << "newPFWork: Creating job. path find threads: " + << mPathFindThread; if (app_.getJobQueue().addJob( jtUPDATE_PF, name, [this]() { updatePaths(); })) { @@ -1828,12 +1869,6 @@ LedgerMaster::setLedgerRangePresent(std::uint32_t minV, std::uint32_t maxV) mCompleteLedgers.insert(range(minV, maxV)); } -void -LedgerMaster::tune(int size, std::chrono::seconds age) -{ - mLedgerHistory.tune(size, age); -} - void LedgerMaster::sweep() { @@ -2074,7 +2109,7 @@ LedgerMaster::doAdvance(std::unique_lock& sl) { JLOG(m_journal.trace()) << "tryAdvance found " << pubLedgers.size() << " ledgers to publish"; - for (auto ledger : pubLedgers) + for (auto const& ledger : pubLedgers) { { ScopedUnlock sul{sl}; diff --git a/src/ripple/app/ledger/impl/TransactionAcquire.cpp b/src/ripple/app/ledger/impl/TransactionAcquire.cpp index 7d958cba869..24a03a16ffb 100644 --- a/src/ripple/app/ledger/impl/TransactionAcquire.cpp +++ b/src/ripple/app/ledger/impl/TransactionAcquire.cpp @@ -65,7 +65,7 @@ TransactionAcquire::done() if (failed_) { - JLOG(journal_.warn()) << "Failed to acquire TX set " << hash_; + JLOG(journal_.debug()) << "Failed to acquire TX set " << hash_; } else { @@ -176,8 +176,7 @@ TransactionAcquire::trigger(std::shared_ptr const& peer) SHAMapAddNode TransactionAcquire::takeNodes( - const std::list& nodeIDs, - const std::list& data, + std::vector> const& data, std::shared_ptr const& peer) { ScopedLockType sl(mtx_); @@ -196,24 +195,20 @@ TransactionAcquire::takeNodes( try { - if (nodeIDs.empty()) + if (data.empty()) return SHAMapAddNode::invalid(); - std::list::const_iterator nodeIDit = nodeIDs.begin(); - std::list::const_iterator nodeDatait = data.begin(); ConsensusTransSetSF sf(app_, app_.getTempNodeCache()); - while (nodeIDit != nodeIDs.end()) + for (auto const& d : data) { - if (nodeIDit->isRoot()) + if (d.first.isRoot()) { if (mHaveRoot) JLOG(journal_.debug()) << "Got root TXS node, already have it"; else if (!mMap->addRootNode( - SHAMapHash{hash_}, - makeSlice(*nodeDatait), - nullptr) + SHAMapHash{hash_}, d.second, nullptr) .isGood()) { JLOG(journal_.warn()) << "TX acquire got bad root node"; @@ -221,24 +216,22 @@ TransactionAcquire::takeNodes( else mHaveRoot = true; } - else if (!mMap->addKnownNode(*nodeIDit, makeSlice(*nodeDatait), &sf) - .isGood()) + else if (!mMap->addKnownNode(d.first, d.second, &sf).isGood()) { JLOG(journal_.warn()) << "TX acquire got bad non-root node"; return SHAMapAddNode::invalid(); } - - ++nodeIDit; - ++nodeDatait; } trigger(peer); progress_ = true; return SHAMapAddNode::useful(); } - catch (std::exception const&) + catch (std::exception const& ex) { - JLOG(journal_.error()) << "Peer sends us junky transaction node data"; + JLOG(journal_.error()) + << "Peer " << peer->id() + << " sent us junky transaction node data: " << ex.what(); return SHAMapAddNode::invalid(); } } diff --git a/src/ripple/app/ledger/impl/TransactionAcquire.h b/src/ripple/app/ledger/impl/TransactionAcquire.h index 611448a444e..3863868fae0 100644 --- a/src/ripple/app/ledger/impl/TransactionAcquire.h +++ b/src/ripple/app/ledger/impl/TransactionAcquire.h @@ -44,8 +44,7 @@ class TransactionAcquire final SHAMapAddNode takeNodes( - const std::list& IDs, - const std::list& data, + std::vector> const& data, std::shared_ptr const&); void diff --git a/src/ripple/app/main/Application.cpp b/src/ripple/app/main/Application.cpp index 933c4939118..2256ee31ba9 100644 --- a/src/ripple/app/main/Application.cpp +++ b/src/ripple/app/main/Application.cpp @@ -225,9 +225,11 @@ class ApplicationImp : public Application, public BasicApp boost::asio::signal_set m_signals; - std::condition_variable cv_; - mutable std::mutex mut_; - bool isTimeToStop = false; + // Once we get C++20, we could use `std::atomic_flag` for `isTimeToStop` + // and eliminate the need for the condition variable and the mutex. + std::condition_variable stoppingCondition_; + mutable std::mutex stoppingMutex_; + std::atomic isTimeToStop = false; std::atomic checkSigs_; @@ -960,110 +962,9 @@ class ApplicationImp : public Application, public BasicApp << "' took " << elapsed.count() << " seconds."; } - // tune caches - using namespace std::chrono; - - m_ledgerMaster->tune( - config_->getValueFor(SizedItem::ledgerSize), - seconds{config_->getValueFor(SizedItem::ledgerAge)}); - return true; } - //-------------------------------------------------------------------------- - - // Called to indicate shutdown. - void - stop() - { - JLOG(m_journal.debug()) << "Application stopping"; - - m_io_latency_sampler.cancel_async(); - - // VFALCO Enormous hack, we have to force the probe to cancel - // before we stop the io_service queue or else it never - // unblocks in its destructor. The fix is to make all - // io_objects gracefully handle exit so that we can - // naturally return from io_service::run() instead of - // forcing a call to io_service::stop() - m_io_latency_sampler.cancel(); - - m_resolver->stop_async(); - - // NIKB This is a hack - we need to wait for the resolver to - // stop. before we stop the io_server_queue or weird - // things will happen. - m_resolver->stop(); - - { - boost::system::error_code ec; - sweepTimer_.cancel(ec); - if (ec) - { - JLOG(m_journal.error()) - << "Application: sweepTimer cancel error: " << ec.message(); - } - - ec.clear(); - entropyTimer_.cancel(ec); - if (ec) - { - JLOG(m_journal.error()) - << "Application: entropyTimer cancel error: " - << ec.message(); - } - } - // Make sure that any waitHandlers pending in our timers are done - // before we declare ourselves stopped. - using namespace std::chrono_literals; - waitHandlerCounter_.join("Application", 1s, m_journal); - - mValidations.flush(); - - validatorSites_->stop(); - - // TODO Store manifests in manifests.sqlite instead of wallet.db - validatorManifests_->save( - getWalletDB(), - "ValidatorManifests", - [this](PublicKey const& pubKey) { - return validators().listed(pubKey); - }); - - publisherManifests_->save( - getWalletDB(), - "PublisherManifests", - [this](PublicKey const& pubKey) { - return validators().trustedPublisher(pubKey); - }); - - // The order of these stop calls is delicate. - // Re-ordering them risks undefined behavior. - m_loadManager->stop(); - m_shaMapStore->stop(); - m_jobQueue->stop(); - if (shardArchiveHandler_) - shardArchiveHandler_->stop(); - if (overlay_) - overlay_->stop(); - if (shardStore_) - shardStore_->stop(); - grpcServer_->stop(); - m_networkOPs->stop(); - serverHandler_->stop(); - m_ledgerReplayer->stop(); - m_inboundTransactions->stop(); - m_inboundLedgers->stop(); - ledgerCleaner_->stop(); - if (reportingETL_) - reportingETL_->stop(); - if (auto pg = dynamic_cast( - &*mRelationalDBInterface)) - pg->stop(); - m_nodeStore->stop(); - perfLog_->stop(); - } - //-------------------------------------------------------------------------- // // PropertyStream @@ -1636,27 +1537,102 @@ ApplicationImp::run() } { - std::unique_lock lk{mut_}; - cv_.wait(lk, [this] { return isTimeToStop; }); + std::unique_lock lk{stoppingMutex_}; + stoppingCondition_.wait(lk, [this] { return isTimeToStop.load(); }); + } + + JLOG(m_journal.debug()) << "Application stopping"; + + m_io_latency_sampler.cancel_async(); + + // VFALCO Enormous hack, we have to force the probe to cancel + // before we stop the io_service queue or else it never + // unblocks in its destructor. The fix is to make all + // io_objects gracefully handle exit so that we can + // naturally return from io_service::run() instead of + // forcing a call to io_service::stop() + m_io_latency_sampler.cancel(); + + m_resolver->stop_async(); + + // NIKB This is a hack - we need to wait for the resolver to + // stop. before we stop the io_server_queue or weird + // things will happen. + m_resolver->stop(); + + { + boost::system::error_code ec; + sweepTimer_.cancel(ec); + if (ec) + { + JLOG(m_journal.error()) + << "Application: sweepTimer cancel error: " << ec.message(); + } + + ec.clear(); + entropyTimer_.cancel(ec); + if (ec) + { + JLOG(m_journal.error()) + << "Application: entropyTimer cancel error: " << ec.message(); + } } - JLOG(m_journal.info()) << "Received shutdown request"; - stop(); + // Make sure that any waitHandlers pending in our timers are done + // before we declare ourselves stopped. + using namespace std::chrono_literals; + + waitHandlerCounter_.join("Application", 1s, m_journal); + + mValidations.flush(); + + validatorSites_->stop(); + + // TODO Store manifests in manifests.sqlite instead of wallet.db + validatorManifests_->save( + getWalletDB(), "ValidatorManifests", [this](PublicKey const& pubKey) { + return validators().listed(pubKey); + }); + + publisherManifests_->save( + getWalletDB(), "PublisherManifests", [this](PublicKey const& pubKey) { + return validators().trustedPublisher(pubKey); + }); + + // The order of these stop calls is delicate. + // Re-ordering them risks undefined behavior. + m_loadManager->stop(); + m_shaMapStore->stop(); + m_jobQueue->stop(); + if (shardArchiveHandler_) + shardArchiveHandler_->stop(); + if (overlay_) + overlay_->stop(); + if (shardStore_) + shardStore_->stop(); + grpcServer_->stop(); + m_networkOPs->stop(); + serverHandler_->stop(); + m_ledgerReplayer->stop(); + m_inboundTransactions->stop(); + m_inboundLedgers->stop(); + ledgerCleaner_->stop(); + if (reportingETL_) + reportingETL_->stop(); + if (auto pg = dynamic_cast( + &*mRelationalDBInterface)) + pg->stop(); + m_nodeStore->stop(); + perfLog_->stop(); + JLOG(m_journal.info()) << "Done."; } void ApplicationImp::signalStop() { - // Unblock the main thread (which is sitting in run()). - // When we get C++20 this can use std::latch. - std::lock_guard lk{mut_}; - - if (!isTimeToStop) - { - isTimeToStop = true; - cv_.notify_all(); - } + if (!isTimeToStop.exchange(true)) + stoppingCondition_.notify_all(); } bool @@ -1674,8 +1650,7 @@ ApplicationImp::checkSigs(bool check) bool ApplicationImp::isStopping() const { - std::lock_guard lk{mut_}; - return isTimeToStop; + return isTimeToStop.load(); } int diff --git a/src/ripple/app/misc/CanonicalTXSet.h b/src/ripple/app/misc/CanonicalTXSet.h index d0dfd97e39d..3ca2179448f 100644 --- a/src/ripple/app/misc/CanonicalTXSet.h +++ b/src/ripple/app/misc/CanonicalTXSet.h @@ -20,6 +20,7 @@ #ifndef RIPPLE_APP_MISC_CANONICALTXSET_H_INCLUDED #define RIPPLE_APP_MISC_CANONICALTXSET_H_INCLUDED +#include #include #include #include @@ -34,7 +35,7 @@ namespace ripple { */ // VFALCO TODO rename to SortedTxSet -class CanonicalTXSet +class CanonicalTXSet : public CountedObject { private: class Key diff --git a/src/ripple/app/misc/Manifest.h b/src/ripple/app/misc/Manifest.h index 5f2b9619f69..a1658428cb7 100644 --- a/src/ripple/app/misc/Manifest.h +++ b/src/ripple/app/misc/Manifest.h @@ -24,7 +24,9 @@ #include #include #include + #include +#include #include namespace ripple { @@ -223,9 +225,8 @@ class DatabaseCon; class ManifestCache { private: - beast::Journal mutable j_; - std::mutex apply_mutex_; - std::mutex mutable read_mutex_; + beast::Journal j_; + std::shared_mutex mutable mutex_; /** Active manifests stored by master public key. */ hash_map map_; @@ -378,8 +379,10 @@ class ManifestCache /** Invokes the callback once for every populated manifest. - @note Undefined behavior results when calling ManifestCache members from - within the callback + @note Do not call ManifestCache member functions from within the + callback. This can re-lock the mutex from the same thread, which is UB. + @note Do not write ManifestCache member variables from within the + callback. This can lead to data races. @param f Function called for each manifest @@ -391,7 +394,7 @@ class ManifestCache void for_each_manifest(Function&& f) const { - std::lock_guard lock{read_mutex_}; + std::shared_lock lock{mutex_}; for (auto const& [_, manifest] : map_) { (void)_; @@ -401,8 +404,10 @@ class ManifestCache /** Invokes the callback once for every populated manifest. - @note Undefined behavior results when calling ManifestCache members from - within the callback + @note Do not call ManifestCache member functions from within the + callback. This can re-lock the mutex from the same thread, which is UB. + @note Do not write ManifestCache member variables from + within the callback. This can lead to data races. @param pf Pre-function called with the maximum number of times f will be called (useful for memory allocations) @@ -417,7 +422,7 @@ class ManifestCache void for_each_manifest(PreFun&& pf, EachFun&& f) const { - std::lock_guard lock{read_mutex_}; + std::shared_lock lock{mutex_}; pf(map_.size()); for (auto const& [_, manifest] : map_) { diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index 2b9c5f316bd..e5dd5765d9a 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -445,9 +445,9 @@ class NetworkOPsImp final : public NetworkOPs pubLedger(std::shared_ptr const& lpAccepted) override; void pubProposedTransaction( - std::shared_ptr const& lpCurrent, - std::shared_ptr const& stTxn, - TER terResult) override; + std::shared_ptr const& ledger, + std::shared_ptr const& transaction, + TER result) override; void pubValidation(std::shared_ptr const& val) override; @@ -612,20 +612,26 @@ class NetworkOPsImp final : public NetworkOPs Json::Value transJson( - const STTx& stTxn, - TER terResult, - bool bValidated, - std::shared_ptr const& lpCurrent); + const STTx& transaction, + TER result, + bool validated, + std::shared_ptr const& ledger); void pubValidatedTransaction( - std::shared_ptr const& alAccepted, - const AcceptedLedgerTx& alTransaction); + std::shared_ptr const& ledger, + AcceptedLedgerTx const& transaction); + void pubAccountTransaction( - std::shared_ptr const& lpCurrent, - const AcceptedLedgerTx& alTransaction, - bool isAccepted); + std::shared_ptr const& ledger, + AcceptedLedgerTx const& transaction); + + void + pubProposedAccountTransaction( + std::shared_ptr const& ledger, + std::shared_ptr const& transaction, + TER result); void pubServer(); @@ -2643,11 +2649,11 @@ NetworkOPsImp::getLedgerFetchInfo() void NetworkOPsImp::pubProposedTransaction( - std::shared_ptr const& lpCurrent, - std::shared_ptr const& stTxn, - TER terResult) + std::shared_ptr const& ledger, + std::shared_ptr const& transaction, + TER result) { - Json::Value jvObj = transJson(*stTxn, terResult, false, lpCurrent); + Json::Value jvObj = transJson(*transaction, result, false, ledger); { std::lock_guard sl(mSubLock); @@ -2668,10 +2674,8 @@ NetworkOPsImp::pubProposedTransaction( } } } - AcceptedLedgerTx alt( - lpCurrent, stTxn, terResult, app_.accountIDCache(), app_.logs()); - JLOG(m_journal.trace()) << "pubProposed: " << alt.getJson(); - pubAccountTransaction(lpCurrent, alt, false); + + pubProposedAccountTransaction(ledger, transaction, result); } void @@ -2846,9 +2850,13 @@ NetworkOPsImp::pubLedger(std::shared_ptr const& lpAccepted) lpAccepted->info().hash, alpAccepted); } + assert(alpAccepted->getLedger().get() == lpAccepted.get()); + { JLOG(m_journal.debug()) - << "Publishing ledger = " << lpAccepted->info().seq; + << "Publishing ledger " << lpAccepted->info().seq << " " + << lpAccepted->info().hash; + std::lock_guard sl(mSubLock); if (!mStreamMaps[sLedger].empty()) @@ -2868,7 +2876,7 @@ NetworkOPsImp::pubLedger(std::shared_ptr const& lpAccepted) jvObj[jss::reserve_inc] = lpAccepted->fees().increment.jsonClipped(); - jvObj[jss::txn_count] = Json::UInt(alpAccepted->getTxnCount()); + jvObj[jss::txn_count] = Json::UInt(alpAccepted->size()); if (mMode >= OperatingMode::SYNCING) { @@ -2882,10 +2890,6 @@ NetworkOPsImp::pubLedger(std::shared_ptr const& lpAccepted) InfoSub::pointer p = it->second.lock(); if (p) { - JLOG(m_journal.debug()) - << "Publishing ledger = " << lpAccepted->info().seq - << " : consumer = " << p->getConsumer() - << " : obj = " << jvObj; p->send(jvObj, true); ++it; } @@ -2917,9 +2921,8 @@ NetworkOPsImp::pubLedger(std::shared_ptr const& lpAccepted) } // Don't lock since pubAcceptedTransaction is locking. - for (auto const& [_, accTx] : alpAccepted->getMap()) + for (auto const& accTx : *alpAccepted) { - (void)_; JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson(); pubValidatedTransaction(lpAccepted, *accTx); } @@ -2969,26 +2972,26 @@ NetworkOPsImp::getLocalTxCount() // transactions. Json::Value NetworkOPsImp::transJson( - const STTx& stTxn, - TER terResult, - bool bValidated, - std::shared_ptr const& lpCurrent) + const STTx& transaction, + TER result, + bool validated, + std::shared_ptr const& ledger) { Json::Value jvObj(Json::objectValue); std::string sToken; std::string sHuman; - transResultInfo(terResult, sToken, sHuman); + transResultInfo(result, sToken, sHuman); jvObj[jss::type] = "transaction"; - jvObj[jss::transaction] = stTxn.getJson(JsonOptions::none); + jvObj[jss::transaction] = transaction.getJson(JsonOptions::none); - if (bValidated) + if (validated) { - jvObj[jss::ledger_index] = lpCurrent->info().seq; - jvObj[jss::ledger_hash] = to_string(lpCurrent->info().hash); + jvObj[jss::ledger_index] = ledger->info().seq; + jvObj[jss::ledger_hash] = to_string(ledger->info().hash); jvObj[jss::transaction][jss::date] = - lpCurrent->info().closeTime.time_since_epoch().count(); + ledger->info().closeTime.time_since_epoch().count(); jvObj[jss::validated] = true; // WRITEME: Put the account next seq here @@ -2996,24 +2999,24 @@ NetworkOPsImp::transJson( else { jvObj[jss::validated] = false; - jvObj[jss::ledger_current_index] = lpCurrent->info().seq; + jvObj[jss::ledger_current_index] = ledger->info().seq; } - jvObj[jss::status] = bValidated ? "closed" : "proposed"; + jvObj[jss::status] = validated ? "closed" : "proposed"; jvObj[jss::engine_result] = sToken; - jvObj[jss::engine_result_code] = terResult; + jvObj[jss::engine_result_code] = result; jvObj[jss::engine_result_message] = sHuman; - if (stTxn.getTxnType() == ttOFFER_CREATE) + if (transaction.getTxnType() == ttOFFER_CREATE) { - auto const account = stTxn.getAccountID(sfAccount); - auto const amount = stTxn.getFieldAmount(sfTakerGets); + auto const account = transaction.getAccountID(sfAccount); + auto const amount = transaction.getFieldAmount(sfTakerGets); // If the offer create is not self funded then add the owner balance if (account != amount.issue().account) { auto const ownerFunds = accountFunds( - *lpCurrent, + *ledger, account, amount, fhIGNORE_FREEZE, @@ -3027,17 +3030,18 @@ NetworkOPsImp::transJson( void NetworkOPsImp::pubValidatedTransaction( - std::shared_ptr const& alAccepted, - const AcceptedLedgerTx& alTx) + std::shared_ptr const& ledger, + const AcceptedLedgerTx& transaction) { - std::shared_ptr stTxn = alTx.getTxn(); - Json::Value jvObj = transJson(*stTxn, alTx.getResult(), true, alAccepted); + auto const& stTxn = transaction.getTxn(); + + Json::Value jvObj = + transJson(*stTxn, transaction.getResult(), true, ledger); - if (auto const txMeta = alTx.getMeta()) { - jvObj[jss::meta] = txMeta->getJson(JsonOptions::none); - RPC::insertDeliveredAmount( - jvObj[jss::meta], *alAccepted, stTxn, *txMeta); + auto const& meta = transaction.getMeta(); + jvObj[jss::meta] = meta.getJson(JsonOptions::none); + RPC::insertDeliveredAmount(jvObj[jss::meta], *ledger, stTxn, meta); } { @@ -3072,32 +3076,31 @@ NetworkOPsImp::pubValidatedTransaction( it = mStreamMaps[sRTTransactions].erase(it); } } - app_.getOrderBookDB().processTxn(alAccepted, alTx, jvObj); - pubAccountTransaction(alAccepted, alTx, true); + + if (transaction.getResult() == tesSUCCESS) + app_.getOrderBookDB().processTxn(ledger, transaction, jvObj); + + pubAccountTransaction(ledger, transaction); } void NetworkOPsImp::pubAccountTransaction( - std::shared_ptr const& lpCurrent, - const AcceptedLedgerTx& alTx, - bool bAccepted) + std::shared_ptr const& ledger, + AcceptedLedgerTx const& transaction) { hash_set notify; int iProposed = 0; int iAccepted = 0; std::vector accountHistoryNotify; - auto const currLedgerSeq = lpCurrent->seq(); + auto const currLedgerSeq = ledger->seq(); { std::lock_guard sl(mSubLock); - if (!bAccepted && mSubRTAccount.empty()) - return; - - if (!mSubAccount.empty() || (!mSubRTAccount.empty()) || + if (!mSubAccount.empty() || !mSubRTAccount.empty() || !mSubAccountHistory.empty()) { - for (auto const& affectedAccount : alTx.getAffected()) + for (auto const& affectedAccount : transaction.getAffected()) { if (auto simiIt = mSubRTAccount.find(affectedAccount); simiIt != mSubRTAccount.end()) @@ -3119,80 +3122,140 @@ NetworkOPsImp::pubAccountTransaction( } } - if (bAccepted) + if (auto simiIt = mSubAccount.find(affectedAccount); + simiIt != mSubAccount.end()) { - if (auto simiIt = mSubAccount.find(affectedAccount); - simiIt != mSubAccount.end()) + auto it = simiIt->second.begin(); + while (it != simiIt->second.end()) { - auto it = simiIt->second.begin(); - while (it != simiIt->second.end()) - { - InfoSub::pointer p = it->second.lock(); + InfoSub::pointer p = it->second.lock(); - if (p) - { - notify.insert(p); - ++it; - ++iAccepted; - } - else - it = simiIt->second.erase(it); + if (p) + { + notify.insert(p); + ++it; + ++iAccepted; } + else + it = simiIt->second.erase(it); } + } - if (auto histoIt = mSubAccountHistory.find(affectedAccount); - histoIt != mSubAccountHistory.end()) + if (auto histoIt = mSubAccountHistory.find(affectedAccount); + histoIt != mSubAccountHistory.end()) + { + auto& subs = histoIt->second; + auto it = subs.begin(); + while (it != subs.end()) { - auto& subs = histoIt->second; - auto it = subs.begin(); - while (it != subs.end()) + SubAccountHistoryInfoWeak const& info = it->second; + if (currLedgerSeq <= info.index_->separationLedgerSeq_) { - SubAccountHistoryInfoWeak const& info = it->second; - if (currLedgerSeq <= - info.index_->separationLedgerSeq_) - { - ++it; - continue; - } + ++it; + continue; + } - if (auto isSptr = info.sinkWptr_.lock(); isSptr) - { - accountHistoryNotify.emplace_back( - SubAccountHistoryInfo{isSptr, info.index_}); - ++it; - } - else - { - it = subs.erase(it); - } + if (auto isSptr = info.sinkWptr_.lock(); isSptr) + { + accountHistoryNotify.emplace_back( + SubAccountHistoryInfo{isSptr, info.index_}); + ++it; + } + else + { + it = subs.erase(it); } - if (subs.empty()) - mSubAccountHistory.erase(histoIt); } + if (subs.empty()) + mSubAccountHistory.erase(histoIt); } } } } JLOG(m_journal.trace()) - << "pubAccountTransaction:" - << " iProposed=" << iProposed << " iAccepted=" << iAccepted; + << "pubAccountTransaction: " + << "proposed=" << iProposed << ", accepted=" << iAccepted; if (!notify.empty() || !accountHistoryNotify.empty()) { - std::shared_ptr stTxn = alTx.getTxn(); + auto const& stTxn = transaction.getTxn(); + Json::Value jvObj = - transJson(*stTxn, alTx.getResult(), bAccepted, lpCurrent); + transJson(*stTxn, transaction.getResult(), true, ledger); - if (alTx.isApplied()) { - if (auto const txMeta = alTx.getMeta()) + auto const& meta = transaction.getMeta(); + + jvObj[jss::meta] = meta.getJson(JsonOptions::none); + RPC::insertDeliveredAmount(jvObj[jss::meta], *ledger, stTxn, meta); + } + + for (InfoSub::ref isrListener : notify) + isrListener->send(jvObj, true); + + assert(!jvObj.isMember(jss::account_history_tx_stream)); + for (auto& info : accountHistoryNotify) + { + auto& index = info.index_; + if (index->forwardTxIndex_ == 0 && !index->haveHistorical_) + jvObj[jss::account_history_tx_first] = true; + jvObj[jss::account_history_tx_index] = index->forwardTxIndex_++; + info.sink_->send(jvObj, true); + } + } +} + +void +NetworkOPsImp::pubProposedAccountTransaction( + std::shared_ptr const& ledger, + std::shared_ptr const& tx, + TER result) +{ + hash_set notify; + int iProposed = 0; + + std::vector accountHistoryNotify; + + { + std::lock_guard sl(mSubLock); + + if (mSubRTAccount.empty()) + return; + + if (!mSubAccount.empty() || !mSubRTAccount.empty() || + !mSubAccountHistory.empty()) + { + for (auto const& affectedAccount : tx->getMentionedAccounts()) { - jvObj[jss::meta] = txMeta->getJson(JsonOptions::none); - RPC::insertDeliveredAmount( - jvObj[jss::meta], *lpCurrent, stTxn, *txMeta); + if (auto simiIt = mSubRTAccount.find(affectedAccount); + simiIt != mSubRTAccount.end()) + { + auto it = simiIt->second.begin(); + + while (it != simiIt->second.end()) + { + InfoSub::pointer p = it->second.lock(); + + if (p) + { + notify.insert(p); + ++it; + ++iProposed; + } + else + it = simiIt->second.erase(it); + } + } } } + } + + JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed; + + if (!notify.empty() || !accountHistoryNotify.empty()) + { + Json::Value jvObj = transJson(*tx, result, false, ledger); for (InfoSub::ref isrListener : notify) isrListener->send(jvObj, true); diff --git a/src/ripple/app/misc/NetworkOPs.h b/src/ripple/app/misc/NetworkOPs.h index 1cf53f12631..d53127ed3b6 100644 --- a/src/ripple/app/misc/NetworkOPs.h +++ b/src/ripple/app/misc/NetworkOPs.h @@ -255,9 +255,9 @@ class NetworkOPs : public InfoSub::Source pubLedger(std::shared_ptr const& lpAccepted) = 0; virtual void pubProposedTransaction( - std::shared_ptr const& lpCurrent, - std::shared_ptr const& stTxn, - TER terResult) = 0; + std::shared_ptr const& ledger, + std::shared_ptr const& transaction, + TER result) = 0; virtual void pubValidation(std::shared_ptr const& val) = 0; diff --git a/src/ripple/app/misc/OrderBook.h b/src/ripple/app/misc/OrderBook.h deleted file mode 100644 index fb96bd5c00c..00000000000 --- a/src/ripple/app/misc/OrderBook.h +++ /dev/null @@ -1,87 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_APP_MISC_ORDERBOOK_H_INCLUDED -#define RIPPLE_APP_MISC_ORDERBOOK_H_INCLUDED - -namespace ripple { - -/** Describes a serialized ledger entry for an order book. */ -class OrderBook -{ -public: - using pointer = std::shared_ptr; - using ref = std::shared_ptr const&; - using List = std::vector; - - /** Construct from a currency specification. - - @param index ??? - @param book in and out currency/issuer pairs. - */ - // VFALCO NOTE what is the meaning of the index parameter? - OrderBook(uint256 const& base, Book const& book) - : mBookBase(base), mBook(book) - { - } - - uint256 const& - getBookBase() const - { - return mBookBase; - } - - Book const& - book() const - { - return mBook; - } - - Currency const& - getCurrencyIn() const - { - return mBook.in.currency; - } - - Currency const& - getCurrencyOut() const - { - return mBook.out.currency; - } - - AccountID const& - getIssuerIn() const - { - return mBook.in.account; - } - - AccountID const& - getIssuerOut() const - { - return mBook.out.account; - } - -private: - uint256 const mBookBase; - Book const mBook; -}; - -} // namespace ripple - -#endif diff --git a/src/ripple/app/misc/TxQ.h b/src/ripple/app/misc/TxQ.h index 0c642c1d955..7e004ec7267 100644 --- a/src/ripple/app/misc/TxQ.h +++ b/src/ripple/app/misc/TxQ.h @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -340,7 +341,7 @@ class TxQ in the queue. */ std::vector - getAccountTxs(AccountID const& account, ReadView const& view) const; + getAccountTxs(AccountID const& account) const; /** Returns information about all transactions currently in the queue. @@ -349,7 +350,7 @@ class TxQ in the queue. */ std::vector - getTxs(ReadView const& view) const; + getTxs() const; /** Summarize current fee metrics for the `fee` RPC command. @@ -575,6 +576,16 @@ class TxQ */ static constexpr int retriesAllowed = 10; + /** The hash of the parent ledger. + + This is used to pseudo-randomize the transaction order when + populating byFee_, by XORing it with the transaction hash (txID). + Using a single static and doing the XOR operation every time was + tested to be as fast or faster than storing the computed "sort key", + and obviously uses less memory. + */ + static LedgerHash parentHashComp; + public: /// Constructor MaybeTx( @@ -621,22 +632,26 @@ class TxQ explicit OrderCandidates() = default; /** Sort @ref MaybeTx by `feeLevel` descending, then by - * transaction ID ascending + * pseudo-randomized transaction ID ascending * * The transaction queue is ordered such that transactions * paying a higher fee are in front of transactions paying * a lower fee, giving them an opportunity to be processed into * the open ledger first. Within transactions paying the same - * fee, order by the arbitrary but consistent transaction ID. - * This allows validators to build similar queues in the same - * order, and thus have more similar initial proposals. + * fee, order by the arbitrary but consistent pseudo-randomized + * transaction ID. The ID is pseudo-randomized by XORing it with + * the open ledger's parent hash, which is deterministic, but + * unpredictable. This allows validators to build similar queues + * in the same order, and thus have more similar initial + * proposals. * */ bool operator()(const MaybeTx& lhs, const MaybeTx& rhs) const { if (lhs.feeLevel == rhs.feeLevel) - return lhs.txID < rhs.txID; + return (lhs.txID ^ MaybeTx::parentHashComp) < + (rhs.txID ^ MaybeTx::parentHashComp); return lhs.feeLevel > rhs.feeLevel; } }; @@ -770,6 +785,14 @@ class TxQ */ std::optional maxSize_; +#if !NDEBUG + /** + parentHash_ checks that no unexpected ledger transitions + happen, and is only checked via debug asserts. + */ + LedgerHash parentHash_{beast::zero}; +#endif + /** Most queue operations are done under the master lock, but use this mutex for the RPC "fee" command, which isn't. */ diff --git a/src/ripple/app/misc/impl/Manifest.cpp b/src/ripple/app/misc/impl/Manifest.cpp index 6df5dd6b53d..d5fcde19e3f 100644 --- a/src/ripple/app/misc/impl/Manifest.cpp +++ b/src/ripple/app/misc/impl/Manifest.cpp @@ -28,8 +28,11 @@ #include #include #include + #include + #include +#include #include namespace ripple { @@ -283,7 +286,7 @@ loadValidatorToken(std::vector const& blob) PublicKey ManifestCache::getSigningKey(PublicKey const& pk) const { - std::lock_guard lock{read_mutex_}; + std::shared_lock lock{mutex_}; auto const iter = map_.find(pk); if (iter != map_.end() && !iter->second.revoked()) @@ -295,7 +298,7 @@ ManifestCache::getSigningKey(PublicKey const& pk) const PublicKey ManifestCache::getMasterKey(PublicKey const& pk) const { - std::lock_guard lock{read_mutex_}; + std::shared_lock lock{mutex_}; if (auto const iter = signingToMasterKeys_.find(pk); iter != signingToMasterKeys_.end()) @@ -307,7 +310,7 @@ ManifestCache::getMasterKey(PublicKey const& pk) const std::optional ManifestCache::getSequence(PublicKey const& pk) const { - std::lock_guard lock{read_mutex_}; + std::shared_lock lock{mutex_}; auto const iter = map_.find(pk); if (iter != map_.end() && !iter->second.revoked()) @@ -319,7 +322,7 @@ ManifestCache::getSequence(PublicKey const& pk) const std::optional ManifestCache::getDomain(PublicKey const& pk) const { - std::lock_guard lock{read_mutex_}; + std::shared_lock lock{mutex_}; auto const iter = map_.find(pk); if (iter != map_.end() && !iter->second.revoked()) @@ -331,7 +334,7 @@ ManifestCache::getDomain(PublicKey const& pk) const std::optional ManifestCache::getManifest(PublicKey const& pk) const { - std::lock_guard lock{read_mutex_}; + std::shared_lock lock{mutex_}; auto const iter = map_.find(pk); if (iter != map_.end() && !iter->second.revoked()) @@ -343,7 +346,7 @@ ManifestCache::getManifest(PublicKey const& pk) const bool ManifestCache::revoked(PublicKey const& pk) const { - std::lock_guard lock{read_mutex_}; + std::shared_lock lock{mutex_}; auto const iter = map_.find(pk); if (iter != map_.end()) @@ -355,86 +358,115 @@ ManifestCache::revoked(PublicKey const& pk) const ManifestDisposition ManifestCache::applyManifest(Manifest m) { - std::lock_guard applyLock{apply_mutex_}; - - // Before we spend time checking the signature, make sure the - // sequence number is newer than any we have. - auto const iter = map_.find(m.masterKey); - - if (iter != map_.end() && m.sequence <= iter->second.sequence) - { - // We received a manifest whose sequence number is not strictly greater - // than the one we already know about. This can happen in several cases - // including when we receive manifests from a peer who doesn't have the - // latest data. - if (auto stream = j_.debug()) - logMftAct( - stream, - "Stale", - m.masterKey, - m.sequence, - iter->second.sequence); - return ManifestDisposition::stale; - } - - // Now check the signature - if (!m.verify()) - { - if (auto stream = j_.warn()) - logMftAct(stream, "Invalid", m.masterKey, m.sequence); - return ManifestDisposition::invalid; - } - - // If the master key associated with a manifest is or might be compromised - // and is, therefore, no longer trustworthy. - // - // A manifest revocation essentially marks a manifest as compromised. By - // setting the sequence number to the highest value possible, the manifest - // is effectively neutered and cannot be superseded by a forged one. - bool const revoked = m.revoked(); - - if (auto stream = j_.warn(); stream && revoked) - logMftAct(stream, "Revoked", m.masterKey, m.sequence); - - std::lock_guard readLock{read_mutex_}; - - // Sanity check: the master key of this manifest should not be used as - // the ephemeral key of another manifest: - if (auto const x = signingToMasterKeys_.find(m.masterKey); - x != signingToMasterKeys_.end()) - { - JLOG(j_.warn()) << to_string(m) - << ": Master key already used as ephemeral key for " - << toBase58(TokenType::NodePublic, x->second); + // Check the manifest against the conditions that do not require a + // `unique_lock` (write lock) on the `mutex_`. Since the signature can be + // relatively expensive, the `checkSignature` parameter determines if the + // signature should be checked. Since `prewriteCheck` is run twice (see + // comment below), `checkSignature` only needs to be set to true on the + // first run. + auto prewriteCheck = + [this, &m](auto const& iter, bool checkSignature, auto const& lock) + -> std::optional { + assert(lock.owns_lock()); + (void)lock; // not used. parameter is present to ensure the mutex is + // locked when the lambda is called. + if (iter != map_.end() && m.sequence <= iter->second.sequence) + { + // We received a manifest whose sequence number is not strictly + // greater than the one we already know about. This can happen in + // several cases including when we receive manifests from a peer who + // doesn't have the latest data. + if (auto stream = j_.debug()) + logMftAct( + stream, + "Stale", + m.masterKey, + m.sequence, + iter->second.sequence); + return ManifestDisposition::stale; + } - return ManifestDisposition::badMasterKey; - } + if (checkSignature && !m.verify()) + { + if (auto stream = j_.warn()) + logMftAct(stream, "Invalid", m.masterKey, m.sequence); + return ManifestDisposition::invalid; + } - if (!revoked) - { - // Sanity check: the ephemeral key of this manifest should not be used - // as the master or ephemeral key of another manifest: - if (auto const x = signingToMasterKeys_.find(m.signingKey); + // If the master key associated with a manifest is or might be + // compromised and is, therefore, no longer trustworthy. + // + // A manifest revocation essentially marks a manifest as compromised. By + // setting the sequence number to the highest value possible, the + // manifest is effectively neutered and cannot be superseded by a forged + // one. + bool const revoked = m.revoked(); + + if (auto stream = j_.warn(); stream && revoked) + logMftAct(stream, "Revoked", m.masterKey, m.sequence); + + // Sanity check: the master key of this manifest should not be used as + // the ephemeral key of another manifest: + if (auto const x = signingToMasterKeys_.find(m.masterKey); x != signingToMasterKeys_.end()) { - JLOG(j_.warn()) - << to_string(m) - << ": Ephemeral key already used as ephemeral key for " - << toBase58(TokenType::NodePublic, x->second); + JLOG(j_.warn()) << to_string(m) + << ": Master key already used as ephemeral key for " + << toBase58(TokenType::NodePublic, x->second); - return ManifestDisposition::badEphemeralKey; + return ManifestDisposition::badMasterKey; } - if (auto const x = map_.find(m.signingKey); x != map_.end()) + if (!revoked) { - JLOG(j_.warn()) - << to_string(m) << ": Ephemeral key used as master key for " - << to_string(x->second); + // Sanity check: the ephemeral key of this manifest should not be + // used as the master or ephemeral key of another manifest: + if (auto const x = signingToMasterKeys_.find(m.signingKey); + x != signingToMasterKeys_.end()) + { + JLOG(j_.warn()) + << to_string(m) + << ": Ephemeral key already used as ephemeral key for " + << toBase58(TokenType::NodePublic, x->second); + + return ManifestDisposition::badEphemeralKey; + } + + if (auto const x = map_.find(m.signingKey); x != map_.end()) + { + JLOG(j_.warn()) + << to_string(m) << ": Ephemeral key used as master key for " + << to_string(x->second); - return ManifestDisposition::badEphemeralKey; + return ManifestDisposition::badEphemeralKey; + } } + + return std::nullopt; + }; + + { + std::shared_lock sl{mutex_}; + if (auto d = + prewriteCheck(map_.find(m.masterKey), /*checkSig*/ true, sl)) + return *d; } + std::unique_lock sl{mutex_}; + auto const iter = map_.find(m.masterKey); + // Since we released the previously held read lock, it's possible that the + // collections have been written to. This means we need to run + // `prewriteCheck` again. This re-does work, but `prewriteCheck` is + // relatively inexpensive to run, and doing it this way allows us to run + // `prewriteCheck` under a `shared_lock` above. + // Note, the signature has already been checked above, so it + // doesn't need to happen again (signature checks are somewhat expensive). + // Note: It's a mistake to use an upgradable lock. This is a recipe for + // deadlock. + if (auto d = prewriteCheck(iter, /*checkSig*/ false, sl)) + return *d; + + bool const revoked = m.revoked(); // This is the first manifest we are seeing for a master key. This should // only ever happen once per validator run. if (iter == map_.end()) @@ -543,7 +575,7 @@ ManifestCache::save( std::string const& dbTable, std::function const& isTrusted) { - std::lock_guard lock{apply_mutex_}; + std::shared_lock lock{mutex_}; auto db = dbCon.checkoutDb(); saveManifests(*db, dbTable, isTrusted, map_, j_); diff --git a/src/ripple/app/misc/impl/TxQ.cpp b/src/ripple/app/misc/impl/TxQ.cpp index 196060a5bc6..0eeec8d62ae 100644 --- a/src/ripple/app/misc/impl/TxQ.cpp +++ b/src/ripple/app/misc/impl/TxQ.cpp @@ -265,6 +265,8 @@ TxQ::FeeMetrics::escalatedSeriesFeeLevel( return totalFeeLevel; } +LedgerHash TxQ::MaybeTx::parentHashComp{}; + TxQ::MaybeTx::MaybeTx( std::shared_ptr const& txn_, TxID const& txID_, @@ -467,13 +469,12 @@ TxQ::eraseAndAdvance(TxQ::FeeMultiSet::const_iterator_type candidateIter) // Check if the next transaction for this account is earlier in the queue, // which means we skipped it earlier, and need to try it again. - OrderCandidates o; auto const feeNextIter = std::next(candidateIter); bool const useAccountNext = accountNextIter != txQAccount.transactions.end() && accountNextIter->first > candidateIter->seqProxy && (feeNextIter == byFee_.end() || - o(accountNextIter->second, *feeNextIter)); + byFee_.value_comp()(accountNextIter->second, *feeNextIter)); auto const candidateNextIter = byFee_.erase(candidateIter); txQAccount.transactions.erase(accountIter); @@ -1529,6 +1530,37 @@ TxQ::accept(Application& app, OpenView& view) } } + // All transactions that can be moved out of the queue into the open + // ledger have been. Rebuild the queue using the open ledger's + // parent hash, so that transactions paying the same fee are + // reordered. + LedgerHash const& parentHash = view.info().parentHash; +#if !NDEBUG + auto const startingSize = byFee_.size(); + assert(parentHash != parentHash_); + parentHash_ = parentHash; +#endif + // byFee_ doesn't "own" the candidate objects inside it, so it's + // perfectly safe to wipe it and start over, repopulating from + // byAccount_. + // + // In the absence of a "re-sort the list in place" function, this + // was the fastest method tried to repopulate the list. + // Other methods included: create a new list and moving items over one at a + // time, create a new list and merge the old list into it. + byFee_.clear(); + + MaybeTx::parentHashComp = parentHash; + + for (auto& [_, account] : byAccount_) + { + for (auto& [_, candidate] : account.transactions) + { + byFee_.insert(candidate); + } + } + assert(byFee_.size() == startingSize); + return ledgerChanged; } @@ -1740,7 +1772,7 @@ TxQ::getTxRequiredFeeAndSeq( } std::vector -TxQ::getAccountTxs(AccountID const& account, ReadView const& view) const +TxQ::getAccountTxs(AccountID const& account) const { std::vector result; @@ -1761,7 +1793,7 @@ TxQ::getAccountTxs(AccountID const& account, ReadView const& view) const } std::vector -TxQ::getTxs(ReadView const& view) const +TxQ::getTxs() const { std::vector result; diff --git a/src/ripple/app/paths/AccountCurrencies.cpp b/src/ripple/app/paths/AccountCurrencies.cpp index 93eb59551e0..2892ff869c9 100644 --- a/src/ripple/app/paths/AccountCurrencies.cpp +++ b/src/ripple/app/paths/AccountCurrencies.cpp @@ -33,24 +33,16 @@ accountSourceCurrencies( if (includeXRP) currencies.insert(xrpCurrency()); - // List of ripple lines. - auto& rippleLines = lrCache->getRippleLines(account); - - for (auto const& item : rippleLines) + for (auto const& rspEntry : lrCache->getRippleLines(account)) { - auto rspEntry = (RippleState*)item.get(); - assert(rspEntry); - if (!rspEntry) - continue; - - auto& saBalance = rspEntry->getBalance(); + auto& saBalance = rspEntry.getBalance(); // Filter out non if (saBalance > beast::zero // Have IOUs to send. - || (rspEntry->getLimitPeer() + || (rspEntry.getLimitPeer() // Peer extends credit. - && ((-saBalance) < rspEntry->getLimitPeer()))) // Credit left. + && ((-saBalance) < rspEntry.getLimitPeer()))) // Credit left. { currencies.insert(saBalance.getCurrency()); } @@ -72,19 +64,11 @@ accountDestCurrencies( currencies.insert(xrpCurrency()); // Even if account doesn't exist - // List of ripple lines. - auto& rippleLines = lrCache->getRippleLines(account); - - for (auto const& item : rippleLines) + for (auto const& rspEntry : lrCache->getRippleLines(account)) { - auto rspEntry = (RippleState*)item.get(); - assert(rspEntry); - if (!rspEntry) - continue; - - auto& saBalance = rspEntry->getBalance(); + auto& saBalance = rspEntry.getBalance(); - if (saBalance < rspEntry->getLimit()) // Can take more + if (saBalance < rspEntry.getLimit()) // Can take more currencies.insert(saBalance.getCurrency()); } diff --git a/src/ripple/app/paths/PathRequest.cpp b/src/ripple/app/paths/PathRequest.cpp index adb0385283b..e5b15fd9d01 100644 --- a/src/ripple/app/paths/PathRequest.cpp +++ b/src/ripple/app/paths/PathRequest.cpp @@ -441,7 +441,7 @@ PathRequest::parseJson(Json::Value const& jvParams) } Json::Value -PathRequest::doClose(Json::Value const&) +PathRequest::doClose() { JLOG(m_journal.debug()) << iIdentifier << " closed"; std::lock_guard sl(mLock); @@ -457,13 +457,20 @@ PathRequest::doStatus(Json::Value const&) return jvStatus; } +void +PathRequest::doAborting() const +{ + JLOG(m_journal.info()) << iIdentifier << " aborting early"; +} + std::unique_ptr const& PathRequest::getPathFinder( std::shared_ptr const& cache, hash_map>& currency_map, Currency const& currency, STAmount const& dst_amount, - int const level) + int const level, + std::function const& continueCallback) { auto i = currency_map.find(currency); if (i != currency_map.end()) @@ -477,8 +484,8 @@ PathRequest::getPathFinder( dst_amount, saSendMax, app_); - if (pathfinder->findPaths(level)) - pathfinder->computePathRanks(max_paths_); + if (pathfinder->findPaths(level, continueCallback)) + pathfinder->computePathRanks(max_paths_, continueCallback); else pathfinder.reset(); // It's a bad request - clear it. return currency_map[currency] = std::move(pathfinder); @@ -488,7 +495,8 @@ bool PathRequest::findPaths( std::shared_ptr const& cache, int const level, - Json::Value& jvArray) + Json::Value& jvArray, + std::function const& continueCallback) { auto sourceCurrencies = sciSourceCurrencies; if (sourceCurrencies.empty() && saSendMax) @@ -515,22 +523,33 @@ PathRequest::findPaths( hash_map> currency_map; for (auto const& issue : sourceCurrencies) { + if (continueCallback && !continueCallback()) + break; JLOG(m_journal.debug()) << iIdentifier << " Trying to find paths: " << STAmount(issue, 1).getFullText(); auto& pathfinder = getPathFinder( - cache, currency_map, issue.currency, dst_amount, level); + cache, + currency_map, + issue.currency, + dst_amount, + level, + continueCallback); if (!pathfinder) { - assert(false); + assert(continueCallback && !continueCallback()); JLOG(m_journal.debug()) << iIdentifier << " No paths found"; continue; } STPath fullLiquidityPath; auto ps = pathfinder->getBestPaths( - max_paths_, fullLiquidityPath, mContext[issue], issue.account); + max_paths_, + fullLiquidityPath, + mContext[issue], + issue.account, + continueCallback); mContext[issue] = ps; auto& sourceAccount = !isXRP(issue.account) @@ -628,7 +647,10 @@ PathRequest::findPaths( } Json::Value -PathRequest::doUpdate(std::shared_ptr const& cache, bool fast) +PathRequest::doUpdate( + std::shared_ptr const& cache, + bool fast, + std::function const& continueCallback) { using namespace std::chrono; JLOG(m_journal.debug()) @@ -699,7 +721,7 @@ PathRequest::doUpdate(std::shared_ptr const& cache, bool fast) JLOG(m_journal.debug()) << iIdentifier << " processing at level " << iLevel; Json::Value jvArray = Json::arrayValue; - if (findPaths(cache, iLevel, jvArray)) + if (findPaths(cache, iLevel, jvArray, continueCallback)) { bLastSuccess = jvArray.size() != 0; newStatus[jss::alternatives] = std::move(jvArray); @@ -730,7 +752,7 @@ PathRequest::doUpdate(std::shared_ptr const& cache, bool fast) } InfoSub::pointer -PathRequest::getSubscriber() +PathRequest::getSubscriber() const { return wpSubscriber.lock(); } diff --git a/src/ripple/app/paths/PathRequest.h b/src/ripple/app/paths/PathRequest.h index 704414d031c..70c286d6e1f 100644 --- a/src/ripple/app/paths/PathRequest.h +++ b/src/ripple/app/paths/PathRequest.h @@ -43,10 +43,10 @@ class PathRequests; // Return values from parseJson <0 = invalid, >0 = valid #define PFR_PJ_INVALID -1 #define PFR_PJ_NOCHANGE 0 -#define PFR_PJ_CHANGE 1 -class PathRequest : public std::enable_shared_from_this, - public CountedObject +class PathRequest final : public InfoSubRequest, + public std::enable_shared_from_this, + public CountedObject { public: using wptr = std::weak_ptr; @@ -55,8 +55,6 @@ class PathRequest : public std::enable_shared_from_this, using wref = const wptr&; public: - // VFALCO TODO Break the cyclic dependency on InfoSub - // path_find semantics // Subscriber is updated PathRequest( @@ -91,15 +89,20 @@ class PathRequest : public std::enable_shared_from_this, doCreate(std::shared_ptr const&, Json::Value const&); Json::Value - doClose(Json::Value const&); + doClose() override; Json::Value - doStatus(Json::Value const&); + doStatus(Json::Value const&) override; + void + doAborting() const; // update jvStatus Json::Value - doUpdate(std::shared_ptr const&, bool fast); + doUpdate( + std::shared_ptr const&, + bool fast, + std::function const& continueCallback = {}); InfoSub::pointer - getSubscriber(); + getSubscriber() const; bool hasCompletion(); @@ -113,13 +116,18 @@ class PathRequest : public std::enable_shared_from_this, hash_map>&, Currency const&, STAmount const&, - int const); + int const, + std::function const&); /** Finds and sets a PathSet in the JSON argument. Returns false if the source currencies are inavlid. */ bool - findPaths(std::shared_ptr const&, int const, Json::Value&); + findPaths( + std::shared_ptr const&, + int const, + Json::Value&, + std::function const&); int parseJson(Json::Value const&); @@ -156,7 +164,7 @@ class PathRequest : public std::enable_shared_from_this, int iLevel; bool bLastSuccess; - int iIdentifier; + int const iIdentifier; std::chrono::steady_clock::time_point const created_; std::chrono::steady_clock::time_point quick_reply_; diff --git a/src/ripple/app/paths/PathRequests.cpp b/src/ripple/app/paths/PathRequests.cpp index 50e591eb1b8..951f55dc800 100644 --- a/src/ripple/app/paths/PathRequests.cpp +++ b/src/ripple/app/paths/PathRequests.cpp @@ -40,8 +40,12 @@ PathRequests::getLineCache( { std::lock_guard sl(mLock); - std::uint32_t lineSeq = mLineCache ? mLineCache->getLedger()->seq() : 0; - std::uint32_t lgrSeq = ledger->seq(); + auto lineCache = lineCache_.lock(); + + std::uint32_t const lineSeq = lineCache ? lineCache->getLedger()->seq() : 0; + std::uint32_t const lgrSeq = ledger->seq(); + JLOG(mJournal.debug()) << "getLineCache has cache for " << lineSeq + << ", considering " << lgrSeq; if ((lineSeq == 0) || // no ledger (authoritative && (lgrSeq > lineSeq)) || // newer authoritative ledger @@ -49,9 +53,15 @@ PathRequests::getLineCache( ((lgrSeq + 8) < lineSeq)) || // we jumped way back for some reason (lgrSeq > (lineSeq + 8))) // we jumped way forward for some reason { - mLineCache = std::make_shared(ledger); + JLOG(mJournal.debug()) + << "getLineCache creating new cache for " << lgrSeq; + // Assign to the local before the member, because the member is a + // weak_ptr, and will immediately discard it if there are no other + // references. + lineCache_ = lineCache = std::make_shared( + ledger, app_.journal("RippleLineCache")); } - return mLineCache; + return lineCache; } void @@ -78,8 +88,20 @@ PathRequests::updateAll(std::shared_ptr const& inLedger) int processed = 0, removed = 0; + auto getSubscriber = + [](PathRequest::pointer const& request) -> InfoSub::pointer { + if (auto ipSub = request->getSubscriber(); + ipSub && ipSub->getRequest() == request) + { + return ipSub; + } + request->doAborting(); + return nullptr; + }; + do { + JLOG(mJournal.trace()) << "updateAll looping"; for (auto const& wr : requests) { if (app_.getJobQueue().isStopping()) @@ -87,25 +109,40 @@ PathRequests::updateAll(std::shared_ptr const& inLedger) auto request = wr.lock(); bool remove = true; + JLOG(mJournal.trace()) + << "updateAll request " << (request ? "" : "not ") << "found"; if (request) { + auto continueCallback = [&getSubscriber, &request]() { + // This callback is used by doUpdate to determine whether to + // continue working. If getSubscriber returns null, that + // indicates that this request is no longer relevant. + return (bool)getSubscriber(request); + }; if (!request->needsUpdate( newRequests, cache->getLedger()->seq())) remove = false; else { - if (auto ipSub = request->getSubscriber()) + if (auto ipSub = getSubscriber(request)) { if (!ipSub->getConsumer().warn()) { - Json::Value update = - request->doUpdate(cache, false); + // Release the shared ptr to the subscriber so that + // it can be freed if the client disconnects, and + // thus fail to lock later. + ipSub.reset(); + Json::Value update = request->doUpdate( + cache, false, continueCallback); request->updateComplete(); update[jss::type] = "path_find"; - ipSub->send(update, false); - remove = false; - ++processed; + if ((ipSub = getSubscriber(request))) + { + ipSub->send(update, false); + remove = false; + ++processed; + } } } else if (request->hasCompletion()) @@ -178,6 +215,13 @@ PathRequests::updateAll(std::shared_ptr const& inLedger) << " processed and " << removed << " removed"; } +bool +PathRequests::requestsPending() const +{ + std::lock_guard sl(mLock); + return !requests_.empty(); +} + void PathRequests::insertPathRequest(PathRequest::pointer const& req) { @@ -211,7 +255,7 @@ PathRequests::makePathRequest( if (valid) { - subscriber->setPathRequest(req); + subscriber->setRequest(req); insertPathRequest(req); app_.getLedgerMaster().newPathRequest(); } @@ -258,7 +302,8 @@ PathRequests::doLegacyPathRequest( std::shared_ptr const& inLedger, Json::Value const& request) { - auto cache = std::make_shared(inLedger); + auto cache = std::make_shared( + inLedger, app_.journal("RippleLineCache")); auto req = std::make_shared( app_, [] {}, consumer, ++mLastIdentifier, *this, mJournal); diff --git a/src/ripple/app/paths/PathRequests.h b/src/ripple/app/paths/PathRequests.h index 75b852867ab..db683ee4c13 100644 --- a/src/ripple/app/paths/PathRequests.h +++ b/src/ripple/app/paths/PathRequests.h @@ -51,6 +51,9 @@ class PathRequests void updateAll(std::shared_ptr const& ledger); + bool + requestsPending() const; + std::shared_ptr getLineCache( std::shared_ptr const& ledger, @@ -109,11 +112,11 @@ class PathRequests std::vector requests_; // Use a RippleLineCache - std::shared_ptr mLineCache; + std::weak_ptr lineCache_; std::atomic mLastIdentifier; - std::recursive_mutex mLock; + std::recursive_mutex mutable mLock; }; } // namespace ripple diff --git a/src/ripple/app/paths/Pathfinder.cpp b/src/ripple/app/paths/Pathfinder.cpp index dec564f3ffb..4e81bebd3c3 100644 --- a/src/ripple/app/paths/Pathfinder.cpp +++ b/src/ripple/app/paths/Pathfinder.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -191,8 +192,11 @@ Pathfinder::Pathfinder( } bool -Pathfinder::findPaths(int searchLevel) +Pathfinder::findPaths( + int searchLevel, + std::function const& continueCallback) { + JLOG(j_.trace()) << "findPaths start"; if (mDstAmount == beast::zero) { // No need to send zero money. @@ -316,10 +320,13 @@ Pathfinder::findPaths(int searchLevel) // Now iterate over all paths for that paymentType. for (auto const& costedPath : mPathTable[paymentType]) { + if (continueCallback && !continueCallback()) + return false; // Only use paths with at most the current search level. if (costedPath.searchLevel <= searchLevel) { - addPathsForType(costedPath.type); + JLOG(j_.trace()) << "findPaths trying payment type " << paymentType; + addPathsForType(costedPath.type, continueCallback); if (mCompletePaths.size() > PATHFINDER_MAX_COMPLETE_PATHS) break; @@ -401,7 +408,9 @@ Pathfinder::getPathLiquidity( } void -Pathfinder::computePathRanks(int maxPaths) +Pathfinder::computePathRanks( + int maxPaths, + std::function const& continueCallback) { mRemainingAmount = convertAmount(mDstAmount, convert_all_); @@ -439,7 +448,7 @@ Pathfinder::computePathRanks(int maxPaths) JLOG(j_.debug()) << "Default path causes exception"; } - rankPaths(maxPaths, mCompletePaths, mPathRanks); + rankPaths(maxPaths, mCompletePaths, mPathRanks, continueCallback); } static bool @@ -480,8 +489,11 @@ void Pathfinder::rankPaths( int maxPaths, STPathSet const& paths, - std::vector& rankedPaths) + std::vector& rankedPaths, + std::function const& continueCallback) { + JLOG(j_.trace()) << "rankPaths with " << paths.size() << " candidates, and " + << maxPaths << " maximum"; rankedPaths.clear(); rankedPaths.reserve(paths.size()); @@ -499,6 +511,8 @@ Pathfinder::rankPaths( for (int i = 0; i < paths.size(); ++i) { + if (continueCallback && !continueCallback()) + return; auto const& currentPath = paths[i]; if (!currentPath.empty()) { @@ -554,7 +568,8 @@ Pathfinder::getBestPaths( int maxPaths, STPath& fullLiquidityPath, STPathSet const& extraPaths, - AccountID const& srcIssuer) + AccountID const& srcIssuer, + std::function const& continueCallback) { JLOG(j_.debug()) << "findPaths: " << mCompletePaths.size() << " paths and " << extraPaths.size() << " extras"; @@ -567,7 +582,7 @@ Pathfinder::getBestPaths( isXRP(mSrcCurrency) || (srcIssuer == mSrcAccount); std::vector extraPathRanks; - rankPaths(maxPaths, extraPaths, extraPathRanks); + rankPaths(maxPaths, extraPaths, extraPathRanks, continueCallback); STPathSet bestPaths; @@ -582,6 +597,8 @@ Pathfinder::getBestPaths( while (pathsIterator != mPathRanks.end() || extraPathsIterator != extraPathRanks.end()) { + if (continueCallback && !continueCallback()) + break; bool usePath = false; bool useExtraPath = false; @@ -692,7 +709,8 @@ Pathfinder::getPathsOut( Currency const& currency, AccountID const& account, bool isDstCurrency, - AccountID const& dstAccount) + AccountID const& dstAccount, + std::function const& continueCallback) { Issue const issue(currency, account); @@ -717,30 +735,27 @@ Pathfinder::getPathsOut( { count = app_.getOrderBookDB().getBookSize(issue); - for (auto const& item : mRLCache->getRippleLines(account)) + for (auto const& rspEntry : mRLCache->getRippleLines(account)) { - RippleState* rspEntry = (RippleState*)item.get(); - - if (currency != rspEntry->getLimit().getCurrency()) + if (currency != rspEntry.getLimit().getCurrency()) { } else if ( - rspEntry->getBalance() <= beast::zero && - (!rspEntry->getLimitPeer() || - -rspEntry->getBalance() >= rspEntry->getLimitPeer() || - (bAuthRequired && !rspEntry->getAuth()))) + rspEntry.getBalance() <= beast::zero && + (!rspEntry.getLimitPeer() || + -rspEntry.getBalance() >= rspEntry.getLimitPeer() || + (bAuthRequired && !rspEntry.getAuth()))) { } - else if ( - isDstCurrency && dstAccount == rspEntry->getAccountIDPeer()) + else if (isDstCurrency && dstAccount == rspEntry.getAccountIDPeer()) { count += 10000; // count a path to the destination extra } - else if (rspEntry->getNoRipplePeer()) + else if (rspEntry.getNoRipplePeer()) { // This probably isn't a useful path out } - else if (rspEntry->getFreezePeer()) + else if (rspEntry.getFreezePeer()) { // Not a useful path out } @@ -758,17 +773,26 @@ void Pathfinder::addLinks( STPathSet const& currentPaths, // The paths to build from STPathSet& incompletePaths, // The set of partial paths we add to - int addFlags) + int addFlags, + std::function const& continueCallback) { JLOG(j_.debug()) << "addLink< on " << currentPaths.size() << " source(s), flags=" << addFlags; for (auto const& path : currentPaths) - addLink(path, incompletePaths, addFlags); + { + if (continueCallback && !continueCallback()) + return; + addLink(path, incompletePaths, addFlags, continueCallback); + } } STPathSet& -Pathfinder::addPathsForType(PathType const& pathType) +Pathfinder::addPathsForType( + PathType const& pathType, + std::function const& continueCallback) { + JLOG(j_.warn()) << "addPathsForType " + << CollectionAndDelimiter(pathType, ", "); // See if the set of paths for this type already exists. auto it = mPaths.find(pathType); if (it != mPaths.end()) @@ -777,13 +801,16 @@ Pathfinder::addPathsForType(PathType const& pathType) // Otherwise, if the type has no nodes, return the empty path. if (pathType.empty()) return mPaths[pathType]; + if (continueCallback && !continueCallback()) + return mPaths[{}]; // Otherwise, get the paths for the parent PathType by calling // addPathsForType recursively. PathType parentPathType = pathType; parentPathType.pop_back(); - STPathSet const& parentPaths = addPathsForType(parentPathType); + STPathSet const& parentPaths = + addPathsForType(parentPathType, continueCallback); STPathSet& pathsOut = mPaths[pathType]; JLOG(j_.debug()) << "getPaths< adding onto '" @@ -803,26 +830,38 @@ Pathfinder::addPathsForType(PathType const& pathType) break; case nt_ACCOUNTS: - addLinks(parentPaths, pathsOut, afADD_ACCOUNTS); + addLinks(parentPaths, pathsOut, afADD_ACCOUNTS, continueCallback); break; case nt_BOOKS: - addLinks(parentPaths, pathsOut, afADD_BOOKS); + addLinks(parentPaths, pathsOut, afADD_BOOKS, continueCallback); break; case nt_XRP_BOOK: - addLinks(parentPaths, pathsOut, afADD_BOOKS | afOB_XRP); + addLinks( + parentPaths, + pathsOut, + afADD_BOOKS | afOB_XRP, + continueCallback); break; case nt_DEST_BOOK: - addLinks(parentPaths, pathsOut, afADD_BOOKS | afOB_LAST); + addLinks( + parentPaths, + pathsOut, + afADD_BOOKS | afOB_LAST, + continueCallback); break; case nt_DESTINATION: // FIXME: What if a different issuer was specified on the // destination amount? // TODO(tom): what does this even mean? Should it be a JIRA? - addLinks(parentPaths, pathsOut, afADD_ACCOUNTS | afAC_LAST); + addLinks( + parentPaths, + pathsOut, + afADD_ACCOUNTS | afAC_LAST, + continueCallback); break; } @@ -893,7 +932,8 @@ void Pathfinder::addLink( const STPath& currentPath, // The path to build from STPathSet& incompletePaths, // The set of partial paths we add to - int addFlags) + int addFlags, + std::function const& continueCallback) { auto const& pathEnd = currentPath.empty() ? mSource : currentPath.back(); auto const& uEndCurrency = pathEnd.getCurrency(); @@ -906,7 +946,8 @@ Pathfinder::addLink( // rather than the ultimate destination? bool const hasEffectiveDestination = mEffectiveDst != mDstAccount; - JLOG(j_.trace()) << "addLink< flags=" << addFlags << " onXRP=" << bOnXRP; + JLOG(j_.trace()) << "addLink< flags=" << addFlags << " onXRP=" << bOnXRP + << " completePaths size=" << mCompletePaths.size(); JLOG(j_.trace()) << currentPath.getJson(JsonOptions::none); if (addFlags & afADD_ACCOUNTS) @@ -940,15 +981,11 @@ Pathfinder::addLink( AccountCandidates candidates; candidates.reserve(rippleLines.size()); - for (auto const& item : rippleLines) + for (auto const& rs : rippleLines) { - auto* rs = dynamic_cast(item.get()); - if (!rs) - { - JLOG(j_.error()) << "Couldn't decipher RippleState"; - continue; - } - auto const& acct = rs->getAccountIDPeer(); + if (continueCallback && !continueCallback()) + return; + auto const& acct = rs.getAccountIDPeer(); if (hasEffectiveDestination && (acct == mDstAccount)) { @@ -963,18 +1000,18 @@ Pathfinder::addLink( continue; } - if ((uEndCurrency == rs->getLimit().getCurrency()) && + if ((uEndCurrency == rs.getLimit().getCurrency()) && !currentPath.hasSeen(acct, uEndCurrency, acct)) { // path is for correct currency and has not been seen - if (rs->getBalance() <= beast::zero && - (!rs->getLimitPeer() || - -rs->getBalance() >= rs->getLimitPeer() || - (bRequireAuth && !rs->getAuth()))) + if (rs.getBalance() <= beast::zero && + (!rs.getLimitPeer() || + -rs.getBalance() >= rs.getLimitPeer() || + (bRequireAuth && !rs.getAuth()))) { // path has no credit } - else if (bIsNoRippleOut && rs->getNoRipple()) + else if (bIsNoRippleOut && rs.getNoRipple()) { // Can't leave on this path } @@ -1011,7 +1048,8 @@ Pathfinder::addLink( uEndCurrency, acct, bIsEndCurrency, - mEffectiveDst); + mEffectiveDst, + continueCallback); if (out) candidates.push_back({out, acct}); } @@ -1039,6 +1077,8 @@ Pathfinder::addLink( auto it = candidates.begin(); while (count-- != 0) { + if (continueCallback && !continueCallback()) + return; // Add accounts to incompletePaths STPathElement pathElement( STPathElement::typeAccount, @@ -1083,17 +1123,17 @@ Pathfinder::addLink( for (auto const& book : books) { + if (continueCallback && !continueCallback()) + return; if (!currentPath.hasSeen( - xrpAccount(), - book->getCurrencyOut(), - book->getIssuerOut()) && - !issueMatchesOrigin(book->book().out) && + xrpAccount(), book.out.currency, book.out.account) && + !issueMatchesOrigin(book.out) && (!bDestOnly || - (book->getCurrencyOut() == mDstAmount.getCurrency()))) + (book.out.currency == mDstAmount.getCurrency()))) { STPath newPath(currentPath); - if (book->getCurrencyOut().isZero()) + if (book.out.currency.isZero()) { // to XRP // add the order book itself @@ -1116,9 +1156,9 @@ Pathfinder::addLink( incompletePaths.push_back(newPath); } else if (!currentPath.hasSeen( - book->getIssuerOut(), - book->getCurrencyOut(), - book->getIssuerOut())) + book.out.account, + book.out.currency, + book.out.account)) { // Don't want the book if we've already seen the issuer // book -> account -> book @@ -1131,8 +1171,8 @@ Pathfinder::addLink( STPathElement::typeCurrency | STPathElement::typeIssuer, xrpAccount(), - book->getCurrencyOut(), - book->getIssuerOut()); + book.out.currency, + book.out.account); } else { @@ -1141,19 +1181,19 @@ Pathfinder::addLink( STPathElement::typeCurrency | STPathElement::typeIssuer, xrpAccount(), - book->getCurrencyOut(), - book->getIssuerOut()); + book.out.currency, + book.out.account); } if (hasEffectiveDestination && - book->getIssuerOut() == mDstAccount && - book->getCurrencyOut() == mDstAmount.getCurrency()) + book.out.account == mDstAccount && + book.out.currency == mDstAmount.getCurrency()) { // We skipped a required issuer } else if ( - book->getIssuerOut() == mEffectiveDst && - book->getCurrencyOut() == mDstAmount.getCurrency()) + book.out.account == mEffectiveDst && + book.out.currency == mDstAmount.getCurrency()) { // with the destination account, this path is // complete JLOG(j_.trace()) @@ -1168,9 +1208,9 @@ Pathfinder::addLink( newPath, STPathElement( STPathElement::typeAccount, - book->getIssuerOut(), - book->getCurrencyOut(), - book->getIssuerOut())); + book.out.account, + book.out.currency, + book.out.account)); } } } diff --git a/src/ripple/app/paths/Pathfinder.h b/src/ripple/app/paths/Pathfinder.h index aa40b143277..45da9ec1126 100644 --- a/src/ripple/app/paths/Pathfinder.h +++ b/src/ripple/app/paths/Pathfinder.h @@ -22,6 +22,7 @@ #include #include +#include #include #include #include @@ -34,7 +35,7 @@ namespace ripple { @see RippleCalc */ -class Pathfinder +class Pathfinder : public CountedObject { public: /** Construct a pathfinder without an issuer.*/ @@ -56,11 +57,15 @@ class Pathfinder initPathTable(); bool - findPaths(int searchLevel); + findPaths( + int searchLevel, + std::function const& continueCallback = {}); /** Compute the rankings of the paths. */ void - computePathRanks(int maxPaths); + computePathRanks( + int maxPaths, + std::function const& continueCallback = {}); /* Get the best paths, up to maxPaths in number, from mCompletePaths. @@ -72,7 +77,8 @@ class Pathfinder int maxPaths, STPath& fullLiquidityPath, STPathSet const& extraPaths, - AccountID const& srcIssuer); + AccountID const& srcIssuer, + std::function const& continueCallback = {}); enum NodeType { nt_SOURCE, // The source account: with an issuer account, if needed. @@ -127,7 +133,9 @@ class Pathfinder // Add all paths of one type to mCompletePaths. STPathSet& - addPathsForType(PathType const& type); + addPathsForType( + PathType const& type, + std::function const& continueCallback); bool issueMatchesOrigin(Issue const&); @@ -137,20 +145,23 @@ class Pathfinder Currency const& currency, AccountID const& account, bool isDestCurrency, - AccountID const& dest); + AccountID const& dest, + std::function const& continueCallback); void addLink( STPath const& currentPath, STPathSet& incompletePaths, - int addFlags); + int addFlags, + std::function const& continueCallback); // Call addLink() for each path in currentPaths. void addLinks( STPathSet const& currentPaths, STPathSet& incompletePaths, - int addFlags); + int addFlags, + std::function const& continueCallback); // Compute the liquidity for a path. Return tesSUCCESS if it has has enough // liquidity to be worth keeping, otherwise an error. @@ -178,7 +189,8 @@ class Pathfinder rankPaths( int maxPaths, STPathSet const& paths, - std::vector& rankedPaths); + std::vector& rankedPaths, + std::function const& continueCallback); AccountID mSrcAccount; AccountID mDstAccount; diff --git a/src/ripple/app/paths/RippleLineCache.cpp b/src/ripple/app/paths/RippleLineCache.cpp index 6bb710f4dae..a0b26ba2841 100644 --- a/src/ripple/app/paths/RippleLineCache.cpp +++ b/src/ripple/app/paths/RippleLineCache.cpp @@ -18,30 +18,47 @@ //============================================================================== #include +#include #include namespace ripple { -RippleLineCache::RippleLineCache(std::shared_ptr const& ledger) +RippleLineCache::RippleLineCache( + std::shared_ptr const& ledger, + beast::Journal j) + : journal_(j) { - // We want the caching that OpenView provides - // And we need to own a shared_ptr to the input view - // VFALCO TODO This should be a CachedLedger - mLedger = std::make_shared(&*ledger, ledger); + mLedger = ledger; + + JLOG(journal_.debug()) << "RippleLineCache created for ledger " + << mLedger->info().seq; } -std::vector const& +RippleLineCache::~RippleLineCache() +{ + JLOG(journal_.debug()) << "~RippleLineCache destroyed for ledger " + << mLedger->info().seq << " with " << lines_.size() + << " accounts"; +} + +std::vector const& RippleLineCache::getRippleLines(AccountID const& accountID) { AccountKey key(accountID, hasher_(accountID)); std::lock_guard sl(mLock); - auto [it, inserted] = - lines_.emplace(key, std::vector()); + auto [it, inserted] = lines_.emplace(key, std::vector()); if (inserted) - it->second = getRippleStateItems(accountID, *mLedger); + it->second = PathFindTrustLine::getItems(accountID, *mLedger); + + JLOG(journal_.debug()) << "RippleLineCache getRippleLines for ledger " + << mLedger->info().seq << " found " + << it->second.size() << " lines for " + << (inserted ? "new " : "existing ") << accountID + << " out of a total of " << lines_.size() + << " accounts"; return it->second; } diff --git a/src/ripple/app/paths/RippleLineCache.h b/src/ripple/app/paths/RippleLineCache.h index b73e7afade9..e7a7e0f74a3 100644 --- a/src/ripple/app/paths/RippleLineCache.h +++ b/src/ripple/app/paths/RippleLineCache.h @@ -21,8 +21,10 @@ #define RIPPLE_APP_PATHS_RIPPLELINECACHE_H_INCLUDED #include -#include +#include +#include #include + #include #include #include @@ -31,10 +33,13 @@ namespace ripple { // Used by Pathfinder -class RippleLineCache +class RippleLineCache final : public CountedObject { public: - explicit RippleLineCache(std::shared_ptr const& l); + explicit RippleLineCache( + std::shared_ptr const& l, + beast::Journal j); + ~RippleLineCache(); std::shared_ptr const& getLedger() const @@ -42,7 +47,7 @@ class RippleLineCache return mLedger; } - std::vector const& + std::vector const& getRippleLines(AccountID const& accountID); private: @@ -51,7 +56,9 @@ class RippleLineCache ripple::hardened_hash<> hasher_; std::shared_ptr mLedger; - struct AccountKey + beast::Journal journal_; + + struct AccountKey final : public CountedObject { AccountID account_; std::size_t hash_value_; @@ -90,7 +97,7 @@ class RippleLineCache }; }; - hash_map, AccountKey::Hash> + hash_map, AccountKey::Hash> lines_; }; diff --git a/src/ripple/app/paths/RippleState.cpp b/src/ripple/app/paths/RippleState.cpp deleted file mode 100644 index 5813f47ea54..00000000000 --- a/src/ripple/app/paths/RippleState.cpp +++ /dev/null @@ -1,85 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include - -namespace ripple { - -RippleState::pointer -RippleState::makeItem( - AccountID const& accountID, - std::shared_ptr sle) -{ - // VFALCO Does this ever happen in practice? - if (!sle || sle->getType() != ltRIPPLE_STATE) - return {}; - return std::make_shared(std::move(sle), accountID); -} - -RippleState::RippleState( - std::shared_ptr&& sle, - AccountID const& viewAccount) - : sle_(std::move(sle)) - , mFlags(sle_->getFieldU32(sfFlags)) - , mLowLimit(sle_->getFieldAmount(sfLowLimit)) - , mHighLimit(sle_->getFieldAmount(sfHighLimit)) - , mLowID(mLowLimit.getIssuer()) - , mHighID(mHighLimit.getIssuer()) - , lowQualityIn_(sle_->getFieldU32(sfLowQualityIn)) - , lowQualityOut_(sle_->getFieldU32(sfLowQualityOut)) - , highQualityIn_(sle_->getFieldU32(sfHighQualityIn)) - , highQualityOut_(sle_->getFieldU32(sfHighQualityOut)) - , mBalance(sle_->getFieldAmount(sfBalance)) -{ - mViewLowest = (mLowID == viewAccount); - - if (!mViewLowest) - mBalance.negate(); -} - -Json::Value -RippleState::getJson(int) -{ - Json::Value ret(Json::objectValue); - ret["low_id"] = to_string(mLowID); - ret["high_id"] = to_string(mHighID); - return ret; -} - -std::vector -getRippleStateItems(AccountID const& accountID, ReadView const& view) -{ - std::vector items; - forEachItem( - view, - accountID, - [&items, &accountID](std::shared_ptr const& sleCur) { - auto ret = RippleState::makeItem(accountID, sleCur); - if (ret) - items.push_back(ret); - }); - - return items; -} - -} // namespace ripple diff --git a/src/ripple/app/paths/TrustLine.cpp b/src/ripple/app/paths/TrustLine.cpp new file mode 100644 index 00000000000..12020acf714 --- /dev/null +++ b/src/ripple/app/paths/TrustLine.cpp @@ -0,0 +1,113 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2012, 2013 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include + +namespace ripple { + +TrustLineBase::TrustLineBase( + std::shared_ptr const& sle, + AccountID const& viewAccount) + : key_(sle->key()) + , mLowLimit(sle->getFieldAmount(sfLowLimit)) + , mHighLimit(sle->getFieldAmount(sfHighLimit)) + , mBalance(sle->getFieldAmount(sfBalance)) + , mFlags(sle->getFieldU32(sfFlags)) + , mViewLowest(mLowLimit.getIssuer() == viewAccount) +{ + if (!mViewLowest) + mBalance.negate(); +} + +Json::Value +TrustLineBase::getJson(int) +{ + Json::Value ret(Json::objectValue); + ret["low_id"] = to_string(mLowLimit.getIssuer()); + ret["high_id"] = to_string(mHighLimit.getIssuer()); + return ret; +} + +std::optional +PathFindTrustLine::makeItem( + AccountID const& accountID, + std::shared_ptr const& sle) +{ + if (!sle || sle->getType() != ltRIPPLE_STATE) + return {}; + return std::optional{PathFindTrustLine{sle, accountID}}; +} + +namespace detail { +template +std::vector +getTrustLineItems(AccountID const& accountID, ReadView const& view) +{ + std::vector items; + forEachItem( + view, + accountID, + [&items, &accountID](std::shared_ptr const& sleCur) { + auto ret = T::makeItem(accountID, sleCur); + if (ret) + items.push_back(std::move(*ret)); + }); + + return items; +} +} // namespace detail + +std::vector +PathFindTrustLine::getItems(AccountID const& accountID, ReadView const& view) +{ + return detail::getTrustLineItems(accountID, view); +} + +RPCTrustLine::RPCTrustLine( + std::shared_ptr const& sle, + AccountID const& viewAccount) + : TrustLineBase(sle, viewAccount) + , lowQualityIn_(sle->getFieldU32(sfLowQualityIn)) + , lowQualityOut_(sle->getFieldU32(sfLowQualityOut)) + , highQualityIn_(sle->getFieldU32(sfHighQualityIn)) + , highQualityOut_(sle->getFieldU32(sfHighQualityOut)) +{ +} + +std::optional +RPCTrustLine::makeItem( + AccountID const& accountID, + std::shared_ptr const& sle) +{ + if (!sle || sle->getType() != ltRIPPLE_STATE) + return {}; + return std::optional{RPCTrustLine{sle, accountID}}; +} + +std::vector +RPCTrustLine::getItems(AccountID const& accountID, ReadView const& view) +{ + return detail::getTrustLineItems(accountID, view); +} + +} // namespace ripple diff --git a/src/ripple/app/paths/RippleState.h b/src/ripple/app/paths/TrustLine.h similarity index 64% rename from src/ripple/app/paths/RippleState.h rename to src/ripple/app/paths/TrustLine.h index ccefb2194a1..0217f0e750a 100644 --- a/src/ripple/app/paths/RippleState.h +++ b/src/ripple/app/paths/TrustLine.h @@ -20,12 +20,14 @@ #ifndef RIPPLE_APP_PATHS_RIPPLESTATE_H_INCLUDED #define RIPPLE_APP_PATHS_RIPPLESTATE_H_INCLUDED +#include #include #include #include #include + #include -#include // +#include namespace ripple { @@ -34,30 +36,32 @@ namespace ripple { "low" account and a "high" account. This wraps the SLE and expresses its data from the perspective of a chosen account on the line. + + This wrapper is primarily used in the path finder and there can easily be + tens of millions of instances of this class. When modifying this class think + carefully about the memory implications. */ -// VFALCO TODO Rename to TrustLine -class RippleState +class TrustLineBase { -public: - // VFALCO Why is this shared_ptr? - using pointer = std::shared_ptr; +protected: + // This class should not be instantiated directly. Use one of the derived + // classes. + TrustLineBase( + std::shared_ptr const& sle, + AccountID const& viewAccount); + + ~TrustLineBase() = default; + TrustLineBase(TrustLineBase const&) = default; + TrustLineBase& + operator=(TrustLineBase const&) = delete; + TrustLineBase(TrustLineBase&&) = default; public: - RippleState() = delete; - - virtual ~RippleState() = default; - - static RippleState::pointer - makeItem(AccountID const& accountID, std::shared_ptr sle); - - // Must be public, for make_shared - RippleState(std::shared_ptr&& sle, AccountID const& viewAccount); - /** Returns the state map key for the ledger entry. */ - uint256 + uint256 const& key() const { - return sle_->key(); + return key_; } // VFALCO Take off the "get" from each function name @@ -65,13 +69,13 @@ class RippleState AccountID const& getAccountID() const { - return mViewLowest ? mLowID : mHighID; + return mViewLowest ? mLowLimit.getIssuer() : mHighLimit.getIssuer(); } AccountID const& getAccountIDPeer() const { - return !mViewLowest ? mLowID : mHighID; + return !mViewLowest ? mLowLimit.getIssuer() : mHighLimit.getIssuer(); } // True, Provided auth to peer. @@ -137,6 +141,52 @@ class RippleState return !mViewLowest ? mLowLimit : mHighLimit; } + Json::Value + getJson(int); + +protected: + uint256 key_; + + STAmount const mLowLimit; + STAmount const mHighLimit; + + STAmount mBalance; + + std::uint32_t mFlags; + + bool mViewLowest; +}; + +// This wrapper is used for the path finder +class PathFindTrustLine final : public TrustLineBase, + public CountedObject +{ + using TrustLineBase::TrustLineBase; + +public: + PathFindTrustLine() = delete; + + static std::optional + makeItem(AccountID const& accountID, std::shared_ptr const& sle); + + static std::vector + getItems(AccountID const& accountID, ReadView const& view); +}; + +// This wrapper is used for the `AccountLines` command and includes the quality +// in and quality out values. +class RPCTrustLine final : public TrustLineBase, + public CountedObject +{ + using TrustLineBase::TrustLineBase; + +public: + RPCTrustLine() = delete; + + RPCTrustLine( + std::shared_ptr const& sle, + AccountID const& viewAccount); + Rate const& getQualityIn() const { @@ -149,33 +199,19 @@ class RippleState return mViewLowest ? lowQualityOut_ : highQualityOut_; } - Json::Value - getJson(int); - -private: - std::shared_ptr sle_; - - bool mViewLowest; - - std::uint32_t mFlags; + static std::optional + makeItem(AccountID const& accountID, std::shared_ptr const& sle); - STAmount const& mLowLimit; - STAmount const& mHighLimit; - - AccountID const& mLowID; - AccountID const& mHighID; + static std::vector + getItems(AccountID const& accountID, ReadView const& view); +private: Rate lowQualityIn_; Rate lowQualityOut_; Rate highQualityIn_; Rate highQualityOut_; - - STAmount mBalance; }; -std::vector -getRippleStateItems(AccountID const& accountID, ReadView const& view); - } // namespace ripple #endif diff --git a/src/ripple/app/rdb/RelationalDBInterface.h b/src/ripple/app/rdb/RelationalDBInterface.h index a0d01545bc5..759261832db 100644 --- a/src/ripple/app/rdb/RelationalDBInterface.h +++ b/src/ripple/app/rdb/RelationalDBInterface.h @@ -125,7 +125,7 @@ class RelationalDBInterface TxMeta const& meta, uint256 const& nodestoreHash, beast::Journal j) - : accounts(meta.getAffectedAccounts(j)) + : accounts(meta.getAffectedAccounts()) , ledgerSequence(meta.getLgrSeq()) , transactionIndex(meta.getIndex()) , txHash(meta.getTxID()) diff --git a/src/ripple/app/rdb/impl/RelationalDBInterface_nodes.cpp b/src/ripple/app/rdb/impl/RelationalDBInterface_nodes.cpp index 748265e624b..c067bfe0cd0 100644 --- a/src/ripple/app/rdb/impl/RelationalDBInterface_nodes.cpp +++ b/src/ripple/app/rdb/impl/RelationalDBInterface_nodes.cpp @@ -222,7 +222,7 @@ saveValidatedLedger( hotLEDGER, std::move(s.modData()), ledger->info().hash, seq); } - AcceptedLedger::pointer aLedger; + std::shared_ptr aLedger; try { aLedger = app.getAcceptedLedgerCache().fetch(ledger->info().hash); @@ -269,9 +269,8 @@ saveValidatedLedger( std::string const ledgerSeq(std::to_string(seq)); - for (auto const& [_, acceptedLedgerTx] : aLedger->getMap()) + for (auto const& acceptedLedgerTx : *aLedger) { - (void)_; uint256 transactionID = acceptedLedgerTx->getTransactionID(); std::string const txnId(to_string(transactionID)); @@ -317,7 +316,7 @@ saveValidatedLedger( JLOG(j.trace()) << "ActTx: " << sql; *db << sql; } - else if (auto const sleTxn = acceptedLedgerTx->getTxn(); + else if (auto const& sleTxn = acceptedLedgerTx->getTxn(); !isPseudoTx(*sleTxn)) { // It's okay for pseudo transactions to not affect any diff --git a/src/ripple/app/rdb/impl/RelationalDBInterface_shards.cpp b/src/ripple/app/rdb/impl/RelationalDBInterface_shards.cpp index acda4ea1d4a..32dcfc25188 100644 --- a/src/ripple/app/rdb/impl/RelationalDBInterface_shards.cpp +++ b/src/ripple/app/rdb/impl/RelationalDBInterface_shards.cpp @@ -79,7 +79,8 @@ saveLedgerMeta( if (app.config().useTxTables()) { - AcceptedLedger::pointer const aLedger = [&app, ledger] { + auto const aLedger = [&app, + ledger]() -> std::shared_ptr { try { auto aLedger = @@ -99,7 +100,7 @@ saveLedgerMeta( << "An accepted ledger was missing nodes"; } - return AcceptedLedger::pointer{nullptr}; + return {}; }(); if (!aLedger) @@ -107,10 +108,8 @@ saveLedgerMeta( soci::transaction tr(txnMetaSession); - for (auto const& [_, acceptedLedgerTx] : aLedger->getMap()) + for (auto const& acceptedLedgerTx : *aLedger) { - (void)_; - std::string_view constexpr txnSQL = R"sql(INSERT OR REPLACE INTO TransactionMeta VALUES (:transactionID,:shardIndex);)sql"; @@ -247,7 +246,7 @@ updateLedgerDBs( "WHERE TransID = :txID;", soci::use(sTxID); - auto const& accounts = txMeta->getAffectedAccounts(j); + auto const& accounts = txMeta->getAffectedAccounts(); if (!accounts.empty()) { auto const sTxnSeq{std::to_string(txMeta->getIndex())}; diff --git a/src/ripple/basics/TaggedCache.h b/src/ripple/basics/TaggedCache.h index 45f069bd97b..548d21dc78e 100644 --- a/src/ripple/basics/TaggedCache.h +++ b/src/ripple/basics/TaggedCache.h @@ -300,19 +300,16 @@ class TaggedCache @param key The key corresponding to the object @param data A shared pointer to the data corresponding to the object. - @param replace `true` if `data` is the up to date version of the object. + @param replace Function that decides if cache should be replaced @return `true` If the key already existed. */ -private: - template +public: bool canonicalize( const key_type& key, - std::conditional_t< - replace, - std::shared_ptr const, - std::shared_ptr>& data) + std::shared_ptr& data, + std::function const&)>&& replace) { // Return canonical value, store if needed, refresh in cache // Return values: true=we had the data already @@ -335,7 +332,7 @@ class TaggedCache if (entry.isCached()) { - if constexpr (replace) + if (replace(entry.ptr)) { entry.ptr = data; entry.weak_ptr = data; @@ -352,7 +349,7 @@ class TaggedCache if (cachedData) { - if constexpr (replace) + if (replace(entry.ptr)) { entry.ptr = data; entry.weak_ptr = data; @@ -374,19 +371,22 @@ class TaggedCache return false; } -public: bool canonicalize_replace_cache( const key_type& key, std::shared_ptr const& data) { - return canonicalize(key, data); + return canonicalize( + key, + const_cast&>(data), + [](std::shared_ptr const&) { return true; }); } bool canonicalize_replace_client(const key_type& key, std::shared_ptr& data) { - return canonicalize(key, data); + return canonicalize( + key, data, [](std::shared_ptr const&) { return false; }); } std::shared_ptr diff --git a/src/ripple/basics/join.h b/src/ripple/basics/join.h new file mode 100644 index 00000000000..dde52bc9e69 --- /dev/null +++ b/src/ripple/basics/join.h @@ -0,0 +1,108 @@ +//------------------------------------------------------------------------------ +/* +This file is part of rippled: https://github.com/ripple/rippled +Copyright (c) 2022 Ripple Labs Inc. + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +#ifndef JOIN_H_INCLUDED +#define JOIN_H_INCLUDED + +#include + +namespace ripple { + +template +Stream& +join(Stream& s, Iter iter, Iter end, std::string const& delimiter) +{ + if (iter == end) + return s; + s << *iter; + for (++iter; iter != end; ++iter) + s << delimiter << *iter; + return s; +} + +template +class CollectionAndDelimiter +{ +public: + Collection const& collection; + std::string const delimiter; + + explicit CollectionAndDelimiter(Collection const& c, std::string delim) + : collection(c), delimiter(std::move(delim)) + { + } + + template + friend Stream& + operator<<(Stream& s, CollectionAndDelimiter const& cd) + { + return join( + s, + std::begin(cd.collection), + std::end(cd.collection), + cd.delimiter); + } +}; + +template +class CollectionAndDelimiter +{ +public: + Collection const* collection; + std::string const delimiter; + + explicit CollectionAndDelimiter(Collection const c[N], std::string delim) + : collection(c), delimiter(std::move(delim)) + { + } + + template + friend Stream& + operator<<(Stream& s, CollectionAndDelimiter const& cd) + { + return join(s, cd.collection, cd.collection + N, cd.delimiter); + } +}; + +// Specialization for const char* strings +template +class CollectionAndDelimiter +{ +public: + char const* collection; + std::string const delimiter; + + explicit CollectionAndDelimiter(char const c[N], std::string delim) + : collection(c), delimiter(std::move(delim)) + { + } + + template + friend Stream& + operator<<(Stream& s, CollectionAndDelimiter const& cd) + { + auto end = cd.collection + N; + if (N > 0 && *(end - 1) == '\0') + --end; + return join(s, cd.collection, end, cd.delimiter); + } +}; + +} // namespace ripple + +#endif diff --git a/src/ripple/core/Job.h b/src/ripple/core/Job.h index 0f3bb718bb6..c4f2eddf35a 100644 --- a/src/ripple/core/Job.h +++ b/src/ripple/core/Job.h @@ -20,6 +20,7 @@ #ifndef RIPPLE_CORE_JOB_H_INCLUDED #define RIPPLE_CORE_JOB_H_INCLUDED +#include #include #include #include @@ -52,17 +53,18 @@ enum JobType { jtRPC, // A websocket command from the client jtSWEEP, // Sweep for stale structures jtVALIDATION_ut, // A validation from an untrusted source + jtMANIFEST, // A validator's manifest jtUPDATE_PF, // Update pathfinding requests jtTRANSACTION_l, // A local transaction jtREPLAY_REQ, // Peer request a ledger delta or a skip list jtLEDGER_REQ, // Peer request ledger/txnset data jtPROPOSAL_ut, // A proposal from an untrusted source jtREPLAY_TASK, // A Ledger replay task/subtask - jtLEDGER_DATA, // Received data for a ledger we're acquiring jtTRANSACTION, // A transaction received from the network jtMISSING_TXN, // Request missing transactions jtREQUESTED_TXN, // Reply with requested transactions jtBATCH, // Apply batched transactions + jtLEDGER_DATA, // Received data for a ledger we're acquiring jtADVANCE, // Advance validated/acquired ledgers jtPUBLEDGER, // Publish a fully-accepted ledger jtTXN_DATA, // Fetch a proposed set @@ -91,7 +93,7 @@ enum JobType { jtNS_WRITE, }; -class Job +class Job : public CountedObject { public: using clock_type = std::chrono::steady_clock; diff --git a/src/ripple/core/JobTypes.h b/src/ripple/core/JobTypes.h index 75ec5ec0b4e..2803537f115 100644 --- a/src/ripple/core/JobTypes.h +++ b/src/ripple/core/JobTypes.h @@ -72,6 +72,7 @@ class JobTypes add(jtPACK, "makeFetchPack", 1, 0ms, 0ms); add(jtPUBOLDLEDGER, "publishAcqLedger", 2, 10000ms, 15000ms); add(jtVALIDATION_ut, "untrustedValidation", maxLimit, 2000ms, 5000ms); + add(jtMANIFEST, "manifest", maxLimit, 2000ms, 5000ms); add(jtTRANSACTION_l, "localTransaction", maxLimit, 100ms, 500ms); add(jtREPLAY_REQ, "ledgerReplayRequest", 10, 250ms, 1000ms); add(jtLEDGER_REQ, "ledgerRequest", 3, 0ms, 0ms); diff --git a/src/ripple/core/impl/Config.cpp b/src/ripple/core/impl/Config.cpp index 50b487f8be8..e53d9688392 100644 --- a/src/ripple/core/impl/Config.cpp +++ b/src/ripple/core/impl/Config.cpp @@ -115,19 +115,19 @@ sizedItems // what they control and whether there exists an explicit // config option that can be used to override the default. - // tiny small medium large huge - {SizedItem::sweepInterval, {{ 10, 30, 60, 90, 120 }}}, - {SizedItem::treeCacheSize, {{ 128000, 256000, 512000, 768000, 2048000 }}}, - {SizedItem::treeCacheAge, {{ 30, 60, 90, 120, 900 }}}, - {SizedItem::ledgerSize, {{ 32, 128, 256, 384, 768 }}}, - {SizedItem::ledgerAge, {{ 30, 90, 180, 240, 900 }}}, - {SizedItem::ledgerFetch, {{ 2, 3, 4, 5, 8 }}}, - {SizedItem::hashNodeDBCache, {{ 4, 12, 24, 64, 128 }}}, - {SizedItem::txnDBCache, {{ 4, 12, 24, 64, 128 }}}, - {SizedItem::lgrDBCache, {{ 4, 8, 16, 32, 128 }}}, - {SizedItem::openFinalLimit, {{ 8, 16, 32, 64, 128 }}}, - {SizedItem::burstSize, {{ 4, 8, 16, 32, 48 }}}, - {SizedItem::ramSizeGB, {{ 8, 12, 16, 24, 32 }}}, + // tiny small medium large huge + {SizedItem::sweepInterval, {{ 10, 30, 60, 90, 120 }}}, + {SizedItem::treeCacheSize, {{ 262144, 524288, 2097152, 4194304, 8388608 }}}, + {SizedItem::treeCacheAge, {{ 30, 60, 90, 120, 900 }}}, + {SizedItem::ledgerSize, {{ 32, 32, 64, 256, 384 }}}, + {SizedItem::ledgerAge, {{ 30, 60, 180, 300, 600 }}}, + {SizedItem::ledgerFetch, {{ 2, 3, 4, 5, 8 }}}, + {SizedItem::hashNodeDBCache, {{ 4, 12, 24, 64, 128 }}}, + {SizedItem::txnDBCache, {{ 4, 12, 24, 64, 128 }}}, + {SizedItem::lgrDBCache, {{ 4, 8, 16, 32, 128 }}}, + {SizedItem::openFinalLimit, {{ 8, 16, 32, 64, 128 }}}, + {SizedItem::burstSize, {{ 4, 8, 16, 32, 48 }}}, + {SizedItem::ramSizeGB, {{ 8, 12, 16, 24, 32 }}}, }}; // Ensure that the order of entries in the table corresponds to the diff --git a/src/ripple/net/InfoSub.h b/src/ripple/net/InfoSub.h index bc6460ea858..3c170669bab 100644 --- a/src/ripple/net/InfoSub.h +++ b/src/ripple/net/InfoSub.h @@ -33,7 +33,18 @@ namespace ripple { // Operations that clients may wish to perform against the network // Master operational handler, server sequencer, network tracker -class PathRequest; +class InfoSubRequest +{ +public: + using pointer = std::shared_ptr; + + virtual ~InfoSubRequest() = default; + + virtual Json::Value + doClose() = 0; + virtual Json::Value + doStatus(Json::Value const&) = 0; +}; /** Manages a client's subscription to data feeds. */ @@ -205,13 +216,13 @@ class InfoSub : public CountedObject deleteSubAccountHistory(AccountID const& account); void - clearPathRequest(); + clearRequest(); void - setPathRequest(const std::shared_ptr& req); + setRequest(const std::shared_ptr& req); - std::shared_ptr const& - getPathRequest(); + std::shared_ptr const& + getRequest(); protected: std::mutex mLock; @@ -221,7 +232,7 @@ class InfoSub : public CountedObject Source& m_source; hash_set realTimeSubscriptions_; hash_set normalSubscriptions_; - std::shared_ptr mPathRequest; + std::shared_ptr request_; std::uint64_t mSeq; hash_set accountHistorySubscriptions_; diff --git a/src/ripple/net/impl/InfoSub.cpp b/src/ripple/net/impl/InfoSub.cpp index 26849f29ca5..9ea5962fa96 100644 --- a/src/ripple/net/impl/InfoSub.cpp +++ b/src/ripple/net/impl/InfoSub.cpp @@ -119,21 +119,21 @@ InfoSub::deleteSubAccountHistory(AccountID const& account) } void -InfoSub::clearPathRequest() +InfoSub::clearRequest() { - mPathRequest.reset(); + request_.reset(); } void -InfoSub::setPathRequest(const std::shared_ptr& req) +InfoSub::setRequest(const std::shared_ptr& req) { - mPathRequest = req; + request_ = req; } -const std::shared_ptr& -InfoSub::getPathRequest() +const std::shared_ptr& +InfoSub::getRequest() { - return mPathRequest; + return request_; } } // namespace ripple diff --git a/src/ripple/nodestore/Database.h b/src/ripple/nodestore/Database.h index f9e8c2418bf..bb9304507d9 100644 --- a/src/ripple/nodestore/Database.h +++ b/src/ripple/nodestore/Database.h @@ -156,7 +156,7 @@ class Database object is stored, used by the shard store. @param callback Callback function when read completes */ - void + virtual void asyncFetch( uint256 const& hash, std::uint32_t ledgerSeq, @@ -366,11 +366,8 @@ class Database std::function const&)>>>> read_; - // last read - uint256 readLastHash_; - - std::vector readThreads_; - bool readStopping_{false}; + std::atomic readStopping_ = false; + std::atomic readThreads_ = 0; virtual std::shared_ptr fetchNodeObject( diff --git a/src/ripple/nodestore/NodeObject.h b/src/ripple/nodestore/NodeObject.h index 2bd73d8dee5..a94e689b34b 100644 --- a/src/ripple/nodestore/NodeObject.h +++ b/src/ripple/nodestore/NodeObject.h @@ -33,7 +33,8 @@ enum NodeObjectType : std::uint32_t { hotUNKNOWN = 0, hotLEDGER = 1, hotACCOUNT_NODE = 3, - hotTRANSACTION_NODE = 4 + hotTRANSACTION_NODE = 4, + hotDUMMY = 512 // an invalid or missing object }; /** A simple object that the Ledger uses to store entries. diff --git a/src/ripple/nodestore/impl/Database.cpp b/src/ripple/nodestore/impl/Database.cpp index da062a682da..bf28f5bfbfb 100644 --- a/src/ripple/nodestore/impl/Database.cpp +++ b/src/ripple/nodestore/impl/Database.cpp @@ -43,15 +43,76 @@ Database::Database( , earliestLedgerSeq_( get(config, "earliest_seq", XRP_LEDGER_EARLIEST_SEQ)) , earliestShardIndex_((earliestLedgerSeq_ - 1) / ledgersPerShard_) + , readThreads_(std::min(1, readThreads)) { + assert(readThreads != 0); + if (ledgersPerShard_ == 0 || ledgersPerShard_ % 256 != 0) Throw("Invalid ledgers_per_shard"); if (earliestLedgerSeq_ < 1) Throw("Invalid earliest_seq"); - while (readThreads-- > 0) - readThreads_.emplace_back(&Database::threadEntry, this); + for (int i = 0; i != readThreads_.load(); ++i) + { + std::thread t( + [this](int i) { + beast::setCurrentThreadName( + "db prefetch #" + std::to_string(i)); + + decltype(read_) read; + + while (!isStopping()) + { + { + std::unique_lock lock(readLock_); + + if (read_.empty()) + readCondVar_.wait(lock); + + if (isStopping()) + continue; + + // We extract up to 64 objects to minimize the overhead + // of acquiring the mutex. + for (int cnt = 0; !read_.empty() && cnt != 64; ++cnt) + read.insert(read_.extract(read_.begin())); + } + + for (auto it = read.begin(); it != read.end(); ++it) + { + assert(!it->second.empty()); + + auto const& hash = it->first; + auto const& data = std::move(it->second); + auto const seqn = data[0].first; + + auto obj = + fetchNodeObject(hash, seqn, FetchType::async); + + // This could be further optimized: if there are + // multiple requests for sequence numbers mapping to + // multiple databases by sorting requests such that all + // indices mapping to the same database are grouped + // together and serviced by a single read. + for (auto const& req : data) + { + req.second( + (seqn == req.first) || isSameDB(req.first, seqn) + ? obj + : fetchNodeObject( + hash, req.first, FetchType::async)); + } + } + + read.clear(); + } + + --readThreads_; + }, + i); + t.detach(); + } } Database::~Database() @@ -68,8 +129,7 @@ Database::~Database() bool Database::isStopping() const { - std::lock_guard lock(readLock_); - return readStopping_; + return readStopping_.load(std::memory_order_relaxed); } std::uint32_t @@ -88,19 +148,15 @@ Database::maxLedgers(std::uint32_t shardIndex) const noexcept void Database::stop() { - // After stop time we can no longer use the JobQueue for background - // reads. Join the background read threads. + if (!readStopping_.exchange(true, std::memory_order_relaxed)) { std::lock_guard lock(readLock_); - if (readStopping_) // Only stop threads once. - return; - - readStopping_ = true; + read_.clear(); readCondVar_.notify_all(); } - for (auto& e : readThreads_) - e.join(); + while (readThreads_.load() != 0) + std::this_thread::yield(); } void @@ -280,53 +336,6 @@ Database::storeLedger( return true; } -// Entry point for async read threads -void -Database::threadEntry() -{ - beast::setCurrentThreadName("prefetch"); - while (true) - { - uint256 lastHash; - std::vector const&)>>> - entry; - - { - std::unique_lock lock(readLock_); - readCondVar_.wait( - lock, [this] { return readStopping_ || !read_.empty(); }); - if (readStopping_) - break; - - // Read in key order to make the back end more efficient - auto it = read_.lower_bound(readLastHash_); - if (it == read_.end()) - { - // start over from the beginning - it = read_.begin(); - } - lastHash = it->first; - entry = std::move(it->second); - read_.erase(it); - readLastHash_ = lastHash; - } - - auto seq = entry[0].first; - auto obj = fetchNodeObject(lastHash, seq, FetchType::async); - - for (auto const& req : entry) - { - if ((seq == req.first) || isSameDB(req.first, seq)) - req.second(obj); - else - req.second( - fetchNodeObject(lastHash, req.first, FetchType::async)); - } - } -} - void Database::getCountsJson(Json::Value& obj) { diff --git a/src/ripple/nodestore/impl/DatabaseNodeImp.cpp b/src/ripple/nodestore/impl/DatabaseNodeImp.cpp index 5de22ccbbb4..9ef878bf3be 100644 --- a/src/ripple/nodestore/impl/DatabaseNodeImp.cpp +++ b/src/ripple/nodestore/impl/DatabaseNodeImp.cpp @@ -33,7 +33,34 @@ DatabaseNodeImp::store( { storeStats(1, data.size()); - backend_->store(NodeObject::createObject(type, std::move(data), hash)); + auto obj = NodeObject::createObject(type, std::move(data), hash); + backend_->store(obj); + if (cache_) + { + // After the store, replace a negative cache entry if there is one + cache_->canonicalize( + hash, obj, [](std::shared_ptr const& n) { + return n->getType() == hotDUMMY; + }); + } +} + +void +DatabaseNodeImp::asyncFetch( + uint256 const& hash, + std::uint32_t ledgerSeq, + std::function const&)>&& callback) +{ + if (cache_) + { + std::shared_ptr obj = cache_->fetch(hash); + if (obj) + { + callback(obj->getType() == hotDUMMY ? nullptr : obj); + return; + } + } + Database::asyncFetch(hash, ledgerSeq, std::move(callback)); } void @@ -75,8 +102,19 @@ DatabaseNodeImp::fetchNodeObject( switch (status) { case ok: - if (nodeObject && cache_) - cache_->canonicalize_replace_client(hash, nodeObject); + if (cache_) + { + if (nodeObject) + cache_->canonicalize_replace_client(hash, nodeObject); + else + { + auto notFound = + NodeObject::createObject(hotDUMMY, {}, hash); + cache_->canonicalize_replace_client(hash, notFound); + if (notFound->getType() != hotDUMMY) + nodeObject = notFound; + } + } break; case notFound: break; @@ -95,6 +133,8 @@ DatabaseNodeImp::fetchNodeObject( { JLOG(j_.trace()) << "fetchNodeObject " << hash << ": record found in cache"; + if (nodeObject->getType() == hotDUMMY) + nodeObject.reset(); } if (nodeObject) @@ -127,7 +167,7 @@ DatabaseNodeImp::fetchBatch(std::vector const& hashes) } else { - results[i] = nObj; + results[i] = nObj->getType() == hotDUMMY ? nullptr : nObj; // It was in the cache. ++hits; } @@ -140,9 +180,8 @@ DatabaseNodeImp::fetchBatch(std::vector const& hashes) for (size_t i = 0; i < dbResults.size(); ++i) { - auto nObj = dbResults[i]; + auto nObj = std::move(dbResults[i]); size_t index = indexMap[cacheMisses[i]]; - results[index] = nObj; auto const& hash = hashes[index]; if (nObj) @@ -156,7 +195,15 @@ DatabaseNodeImp::fetchBatch(std::vector const& hashes) JLOG(j_.error()) << "fetchBatch - " << "record not found in db or cache. hash = " << strHex(hash); + if (cache_) + { + auto notFound = NodeObject::createObject(hotDUMMY, {}, hash); + cache_->canonicalize_replace_client(hash, notFound); + if (notFound->getType() != hotDUMMY) + nObj = std::move(notFound); + } } + results[index] = std::move(nObj); } auto fetchDurationUs = diff --git a/src/ripple/nodestore/impl/DatabaseNodeImp.h b/src/ripple/nodestore/impl/DatabaseNodeImp.h index 478b3cf6660..452bd8d27fe 100644 --- a/src/ripple/nodestore/impl/DatabaseNodeImp.h +++ b/src/ripple/nodestore/impl/DatabaseNodeImp.h @@ -111,6 +111,7 @@ class DatabaseNodeImp : public Database // only one database return true; } + void sync() override { @@ -120,6 +121,13 @@ class DatabaseNodeImp : public Database std::vector> fetchBatch(std::vector const& hashes); + void + asyncFetch( + uint256 const& hash, + std::uint32_t ledgerSeq, + std::function const&)>&& callback) + override; + bool storeLedger(std::shared_ptr const& srcLedger) override { diff --git a/src/ripple/overlay/impl/PeerImp.cpp b/src/ripple/overlay/impl/PeerImp.cpp index 6f05328212c..bc379c14725 100644 --- a/src/ripple/overlay/impl/PeerImp.cpp +++ b/src/ripple/overlay/impl/PeerImp.cpp @@ -1067,10 +1067,8 @@ PeerImp::onMessage(std::shared_ptr const& m) if (s > 100) fee_ = Resource::feeMediumBurdenPeer; - // VFALCO What's the right job type? - auto that = shared_from_this(); app_.getJobQueue().addJob( - jtVALIDATION_ut, "receiveManifests", [this, that, m]() { + jtMANIFEST, "receiveManifests", [this, that = shared_from_this(), m]() { overlay_.onManifests(m, that); }); } @@ -1341,7 +1339,7 @@ PeerImp::onMessage(std::shared_ptr const& m) // case ShardState::finalized: default: return badData("Invalid incomplete shard state"); - }; + } s.add32(incomplete.state()); // Verify progress @@ -1589,17 +1587,18 @@ PeerImp::handleTransaction( } } - if (app_.getJobQueue().getJobCount(jtTRANSACTION) > + if (app_.getLedgerMaster().getValidatedLedgerAge() > 4min) + { + JLOG(p_journal_.trace()) + << "No new transactions until synchronized"; + } + else if ( + app_.getJobQueue().getJobCount(jtTRANSACTION) > app_.config().MAX_TRANSACTIONS) { overlay_.incJqTransOverflow(); JLOG(p_journal_.info()) << "Transaction queue is full"; } - else if (app_.getLedgerMaster().getValidatedLedgerAge() > 4min) - { - JLOG(p_journal_.trace()) - << "No new transactions until synchronized"; - } else { app_.getJobQueue().addJob( @@ -2575,6 +2574,7 @@ PeerImp::onMessage(std::shared_ptr const& m) return; auto key = sha512Half(makeSlice(m->validation())); + if (auto [added, relayed] = app_.getHashRouter().addSuppressionPeerWithStatus(key, id_); !added) @@ -2594,22 +2594,36 @@ PeerImp::onMessage(std::shared_ptr const& m) if (!isTrusted && (tracking_.load() == Tracking::diverged)) { JLOG(p_journal_.debug()) - << "Validation: dropping untrusted from diverged peer"; + << "Dropping untrusted validation from diverged peer"; } - if (isTrusted || cluster() || !app_.getFeeTrack().isLoadedLocal()) + else if (isTrusted || !app_.getFeeTrack().isLoadedLocal()) { + std::string const name = [isTrusted, val]() { + std::string ret = + isTrusted ? "Trusted validation" : "Untrusted validation"; + +#ifdef DEBUG + ret += " " + + std::to_string(val->getFieldU32(sfLedgerSequence)) + ": " + + to_string(val->getNodeID()); +#endif + + return ret; + }(); + std::weak_ptr weak = shared_from_this(); app_.getJobQueue().addJob( isTrusted ? jtVALIDATION_t : jtVALIDATION_ut, - "recvValidation->checkValidation", - [weak, val, m]() { + name, + [weak, val, m, key]() { if (auto peer = weak.lock()) - peer->checkValidation(val, m); + peer->checkValidation(val, key, m); }); } else { - JLOG(p_journal_.debug()) << "Validation: Dropping UNTRUSTED (load)"; + JLOG(p_journal_.debug()) + << "Dropping untrusted validation for load"; } } catch (std::exception const& e) @@ -3154,12 +3168,13 @@ PeerImp::checkPropose( void PeerImp::checkValidation( std::shared_ptr const& val, + uint256 const& key, std::shared_ptr const& packet) { - if (!cluster() && !val->isValid()) + if (!val->isValid()) { JLOG(p_journal_.debug()) << "Validation forwarded by peer is invalid"; - charge(Resource::feeInvalidRequest); + charge(Resource::feeInvalidSignature); return; } @@ -3169,18 +3184,16 @@ PeerImp::checkValidation( if (app_.getOPs().recvValidation(val, std::to_string(id())) || cluster()) { - auto const suppression = - sha512Half(makeSlice(val->getSerialized())); // haveMessage contains peers, which are suppressed; i.e. the peers // are the source of the message, consequently the message should // not be relayed to these peers. But the message must be counted // as part of the squelch logic. auto haveMessage = - overlay_.relay(*packet, suppression, val->getSignerPublic()); + overlay_.relay(*packet, key, val->getSignerPublic()); if (reduceRelayReady() && !haveMessage.empty()) { overlay_.updateSlotAndSquelch( - suppression, + key, val->getSignerPublic(), std::move(haveMessage), protocol::mtVALIDATION); @@ -3525,8 +3538,8 @@ PeerImp::processLedgerRequest(std::shared_ptr const& m) { auto const queryDepth{ m->has_querydepth() ? m->querydepth() : (isHighLatency() ? 2 : 1)}; - std::vector nodeIds; - std::vector rawNodes; + + std::vector> data; for (int i = 0; i < m->nodeids_size() && ledgerData.nodes_size() < Tuning::softMaxReplyNodes; @@ -3534,30 +3547,22 @@ PeerImp::processLedgerRequest(std::shared_ptr const& m) { auto const shaMapNodeId{deserializeSHAMapNodeID(m->nodeids(i))}; - nodeIds.clear(); - rawNodes.clear(); + data.clear(); + data.reserve(Tuning::softMaxReplyNodes); + try { - if (map->getNodeFat( - *shaMapNodeId, - nodeIds, - rawNodes, - fatLeaves, - queryDepth)) + if (map->getNodeFat(*shaMapNodeId, data, fatLeaves, queryDepth)) { - assert(nodeIds.size() == rawNodes.size()); JLOG(p_journal_.trace()) << "processLedgerRequest: getNodeFat got " - << rawNodes.size() << " nodes"; + << data.size() << " nodes"; - auto rawNodeIter{rawNodes.begin()}; - for (auto const& nodeId : nodeIds) + for (auto const& d : data) { protocol::TMLedgerNode* node{ledgerData.add_nodes()}; - node->set_nodeid(nodeId.getRawString()); - node->set_nodedata( - &rawNodeIter->front(), rawNodeIter->size()); - ++rawNodeIter; + node->set_nodeid(d.first.getRawString()); + node->set_nodedata(d.second.data(), d.second.size()); } } else @@ -3609,9 +3614,7 @@ PeerImp::processLedgerRequest(std::shared_ptr const& m) << ledgerData.nodes_size() << " nodes"; } - auto message{ - std::make_shared(ledgerData, protocol::mtLEDGER_DATA)}; - send(message); + send(std::make_shared(ledgerData, protocol::mtLEDGER_DATA)); } int diff --git a/src/ripple/overlay/impl/PeerImp.h b/src/ripple/overlay/impl/PeerImp.h index 8bed64e724c..710ab4d74d6 100644 --- a/src/ripple/overlay/impl/PeerImp.h +++ b/src/ripple/overlay/impl/PeerImp.h @@ -626,6 +626,7 @@ class PeerImp : public Peer, void checkValidation( std::shared_ptr const& val, + uint256 const& key, std::shared_ptr const& packet); void diff --git a/src/ripple/protocol/Book.h b/src/ripple/protocol/Book.h index 8a0867fe3e2..1469b60dd1b 100644 --- a/src/ripple/protocol/Book.h +++ b/src/ripple/protocol/Book.h @@ -20,6 +20,7 @@ #ifndef RIPPLE_PROTOCOL_BOOK_H_INCLUDED #define RIPPLE_PROTOCOL_BOOK_H_INCLUDED +#include #include #include @@ -29,7 +30,7 @@ namespace ripple { The order book is a pair of Issues called in and out. @see Issue. */ -class Book +class Book final : public CountedObject { public: Issue in; diff --git a/src/ripple/protocol/SField.h b/src/ripple/protocol/SField.h index f9278ea73fc..28da73436b8 100644 --- a/src/ripple/protocol/SField.h +++ b/src/ripple/protocol/SField.h @@ -341,6 +341,7 @@ extern SF_UINT8 const sfMethod; extern SF_UINT8 const sfTransactionResult; extern SF_UINT8 const sfTickSize; extern SF_UINT8 const sfUNLModifyDisabling; +extern SF_UINT8 const sfHookResult; // 16-bit integers extern SF_UINT16 const sfLedgerEntryType; @@ -349,6 +350,10 @@ extern SF_UINT16 const sfSignerWeight; // 16-bit integers (uncommon) extern SF_UINT16 const sfVersion; +extern SF_UINT16 const sfHookStateChangeCount; +extern SF_UINT16 const sfHookEmitCount; +extern SF_UINT16 const sfHookExecutionIndex; +extern SF_UINT16 const sfHookApiVersion; // 32-bit integers (common) extern SF_UINT32 const sfFlags; @@ -392,6 +397,8 @@ extern SF_UINT32 const sfSignerListID; extern SF_UINT32 const sfSettleDelay; extern SF_UINT32 const sfTicketCount; extern SF_UINT32 const sfTicketSequence; +extern SF_UINT32 const sfHookStateCount; +extern SF_UINT32 const sfEmitGeneration; // 64-bit integers extern SF_UINT64 const sfIndexNext; @@ -405,6 +412,11 @@ extern SF_UINT64 const sfHighNode; extern SF_UINT64 const sfDestinationNode; extern SF_UINT64 const sfCookie; extern SF_UINT64 const sfServerVersion; +extern SF_UINT64 const sfHookOn; +extern SF_UINT64 const sfHookInstructionCount; +extern SF_UINT64 const sfEmitBurden; +extern SF_UINT64 const sfHookReturnCode; +extern SF_UINT64 const sfReferenceCount; // 128-bit extern SF_HASH128 const sfEmailHash; @@ -425,6 +437,9 @@ extern SF_HASH256 const sfLedgerIndex; extern SF_HASH256 const sfWalletLocator; extern SF_HASH256 const sfRootIndex; extern SF_HASH256 const sfAccountTxnID; +extern SF_HASH256 const sfEmitParentTxnID; +extern SF_HASH256 const sfEmitNonce; +extern SF_HASH256 const sfEmitHookHash; // 256-bit (uncommon) extern SF_HASH256 const sfBookDirectory; @@ -436,6 +451,10 @@ extern SF_HASH256 const sfChannel; extern SF_HASH256 const sfConsensusHash; extern SF_HASH256 const sfCheckID; extern SF_HASH256 const sfValidatedHash; +extern SF_HASH256 const sfHookStateKey; +extern SF_HASH256 const sfHookHash; +extern SF_HASH256 const sfHookNamespace; +extern SF_HASH256 const sfHookSetTxnID; // currency amount (common) extern SF_AMOUNT const sfAmount; @@ -476,6 +495,10 @@ extern SF_VL const sfMasterSignature; extern SF_VL const sfUNLModifyValidator; extern SF_VL const sfValidatorToDisable; extern SF_VL const sfValidatorToReEnable; +extern SF_VL const sfHookStateData; +extern SF_VL const sfHookReturnString; +extern SF_VL const sfHookParameterName; +extern SF_VL const sfHookParameterValue; // account extern SF_ACCOUNT const sfAccount; @@ -486,6 +509,10 @@ extern SF_ACCOUNT const sfAuthorize; extern SF_ACCOUNT const sfUnauthorize; extern SF_ACCOUNT const sfTarget; extern SF_ACCOUNT const sfRegularKey; +extern SF_ACCOUNT const sfEmitCallback; + +// account (uncommon) +extern SF_ACCOUNT const sfHookAccount; // path set extern SField const sfPaths; @@ -510,6 +537,11 @@ extern SField const sfSignerEntry; extern SField const sfSigner; extern SField const sfMajority; extern SField const sfDisabledValidator; +extern SField const sfEmittedTxn; +extern SField const sfHook; +extern SField const sfHookDefinition; +extern SField const sfHookParameter; +extern SField const sfHookGrant; // array of objects // ARRAY/1 is reserved for end of array @@ -523,6 +555,13 @@ extern SField const sfAffectedNodes; extern SField const sfMemos; extern SField const sfMajorities; extern SField const sfDisabledValidators; +extern SField const sfEmitDetails; +extern SField const sfHookExecutions; +extern SField const sfHookExecution; +extern SField const sfHookParameters; +extern SField const sfHooks; +extern SField const sfHookGrants; + //------------------------------------------------------------------------------ } // namespace ripple diff --git a/src/ripple/protocol/STAmount.h b/src/ripple/protocol/STAmount.h index 0f3023aaf8b..d0add30dbba 100644 --- a/src/ripple/protocol/STAmount.h +++ b/src/ripple/protocol/STAmount.h @@ -20,6 +20,7 @@ #ifndef RIPPLE_PROTOCOL_STAMOUNT_H_INCLUDED #define RIPPLE_PROTOCOL_STAMOUNT_H_INCLUDED +#include #include #include #include @@ -40,7 +41,7 @@ namespace ripple { // Wire form: // High 8 bits are (offset+142), legal range is, 80 to 22 inclusive // Low 56 bits are value, legal range is 10^15 to (10^16 - 1) inclusive -class STAmount : public STBase +class STAmount final : public STBase, public CountedObject { public: using mantissa_type = std::uint64_t; diff --git a/src/ripple/protocol/STPathSet.h b/src/ripple/protocol/STPathSet.h index 3ac2c07970a..8102bc76eb0 100644 --- a/src/ripple/protocol/STPathSet.h +++ b/src/ripple/protocol/STPathSet.h @@ -20,6 +20,7 @@ #ifndef RIPPLE_PROTOCOL_STPATHSET_H_INCLUDED #define RIPPLE_PROTOCOL_STPATHSET_H_INCLUDED +#include #include #include #include @@ -30,7 +31,7 @@ namespace ripple { -class STPathElement +class STPathElement final : public CountedObject { unsigned int mType; AccountID mAccountID; @@ -114,7 +115,7 @@ class STPathElement get_hash(STPathElement const& element); }; -class STPath +class STPath final : public CountedObject { std::vector mPath; @@ -172,7 +173,7 @@ class STPath //------------------------------------------------------------------------------ // A set of zero or more payment paths -class STPathSet final : public STBase +class STPathSet final : public STBase, public CountedObject { std::vector value; diff --git a/src/ripple/protocol/TxMeta.h b/src/ripple/protocol/TxMeta.h index 6d61a27e833..0a6578b1930 100644 --- a/src/ripple/protocol/TxMeta.h +++ b/src/ripple/protocol/TxMeta.h @@ -84,7 +84,7 @@ class TxMeta /** Return a list of accounts affected by this transaction */ boost::container::flat_set - getAffectedAccounts(beast::Journal j) const; + getAffectedAccounts() const; Json::Value getJson(JsonOptions p) const diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index cb749b4812a..9a261b81f55 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.9.0-b1" +char const* const versionString = "1.9.0-b2" // clang-format on #if defined(DEBUG) || defined(SANITIZER) diff --git a/src/ripple/protocol/impl/SField.cpp b/src/ripple/protocol/impl/SField.cpp index 590ffeb65a4..679248dea6e 100644 --- a/src/ripple/protocol/impl/SField.cpp +++ b/src/ripple/protocol/impl/SField.cpp @@ -88,6 +88,7 @@ CONSTRUCT_TYPED_SFIELD(sfTransactionResult, "TransactionResult", UINT8, // 8-bit integers (uncommon) CONSTRUCT_TYPED_SFIELD(sfTickSize, "TickSize", UINT8, 16); CONSTRUCT_TYPED_SFIELD(sfUNLModifyDisabling, "UNLModifyDisabling", UINT8, 17); +CONSTRUCT_TYPED_SFIELD(sfHookResult, "HookResult", UINT8, 18); // 16-bit integers CONSTRUCT_TYPED_SFIELD(sfLedgerEntryType, "LedgerEntryType", UINT16, 1, SField::sMD_Never); @@ -96,6 +97,10 @@ CONSTRUCT_TYPED_SFIELD(sfSignerWeight, "SignerWeight", UINT16, // 16-bit integers (uncommon) CONSTRUCT_TYPED_SFIELD(sfVersion, "Version", UINT16, 16); +CONSTRUCT_TYPED_SFIELD(sfHookStateChangeCount, "HookStateChangeCount", UINT16, 17); +CONSTRUCT_TYPED_SFIELD(sfHookEmitCount, "HookEmitCount", UINT16, 18); +CONSTRUCT_TYPED_SFIELD(sfHookExecutionIndex, "HookExecutionIndex", UINT16, 19); +CONSTRUCT_TYPED_SFIELD(sfHookApiVersion, "HookApiVersion", UINT16, 20); // 32-bit integers (common) CONSTRUCT_TYPED_SFIELD(sfFlags, "Flags", UINT32, 2); @@ -139,6 +144,8 @@ CONSTRUCT_TYPED_SFIELD(sfSignerListID, "SignerListID", UINT32, CONSTRUCT_TYPED_SFIELD(sfSettleDelay, "SettleDelay", UINT32, 39); CONSTRUCT_TYPED_SFIELD(sfTicketCount, "TicketCount", UINT32, 40); CONSTRUCT_TYPED_SFIELD(sfTicketSequence, "TicketSequence", UINT32, 41); +CONSTRUCT_TYPED_SFIELD(sfHookStateCount, "HookStateCount", UINT32, 45); +CONSTRUCT_TYPED_SFIELD(sfEmitGeneration, "EmitGeneration", UINT32, 46); // 64-bit integers CONSTRUCT_TYPED_SFIELD(sfIndexNext, "IndexNext", UINT64, 1); @@ -152,6 +159,11 @@ CONSTRUCT_TYPED_SFIELD(sfHighNode, "HighNode", UINT64, CONSTRUCT_TYPED_SFIELD(sfDestinationNode, "DestinationNode", UINT64, 9); CONSTRUCT_TYPED_SFIELD(sfCookie, "Cookie", UINT64, 10); CONSTRUCT_TYPED_SFIELD(sfServerVersion, "ServerVersion", UINT64, 11); +CONSTRUCT_TYPED_SFIELD(sfEmitBurden, "EmitBurden", UINT64, 13); +CONSTRUCT_TYPED_SFIELD(sfHookOn, "HookOn", UINT64, 16); +CONSTRUCT_TYPED_SFIELD(sfHookInstructionCount, "HookInstructionCount", UINT64, 17); +CONSTRUCT_TYPED_SFIELD(sfHookReturnCode, "HookReturnCode", UINT64, 18); +CONSTRUCT_TYPED_SFIELD(sfReferenceCount, "ReferenceCount", UINT64, 19); // 128-bit CONSTRUCT_TYPED_SFIELD(sfEmailHash, "EmailHash", HASH128, 1); @@ -172,6 +184,9 @@ CONSTRUCT_TYPED_SFIELD(sfLedgerIndex, "LedgerIndex", HASH256, CONSTRUCT_TYPED_SFIELD(sfWalletLocator, "WalletLocator", HASH256, 7); CONSTRUCT_TYPED_SFIELD(sfRootIndex, "RootIndex", HASH256, 8, SField::sMD_Always); CONSTRUCT_TYPED_SFIELD(sfAccountTxnID, "AccountTxnID", HASH256, 9); +CONSTRUCT_TYPED_SFIELD(sfEmitParentTxnID, "EmitParentTxnID", HASH256, 11); +CONSTRUCT_TYPED_SFIELD(sfEmitNonce, "EmitNonce", HASH256, 12); +CONSTRUCT_TYPED_SFIELD(sfEmitHookHash, "EmitHookHash", HASH256, 13); // 256-bit (uncommon) CONSTRUCT_TYPED_SFIELD(sfBookDirectory, "BookDirectory", HASH256, 16); @@ -184,6 +199,10 @@ CONSTRUCT_TYPED_SFIELD(sfChannel, "Channel", HASH256, CONSTRUCT_TYPED_SFIELD(sfConsensusHash, "ConsensusHash", HASH256, 23); CONSTRUCT_TYPED_SFIELD(sfCheckID, "CheckID", HASH256, 24); CONSTRUCT_TYPED_SFIELD(sfValidatedHash, "ValidatedHash", HASH256, 25); +CONSTRUCT_TYPED_SFIELD(sfHookStateKey, "HookStateKey", HASH256, 30); +CONSTRUCT_TYPED_SFIELD(sfHookHash, "HookHash", HASH256, 31); +CONSTRUCT_TYPED_SFIELD(sfHookNamespace, "HookNamespace", HASH256, 32); +CONSTRUCT_TYPED_SFIELD(sfHookSetTxnID, "HookSetTxnID", HASH256, 33); // currency amount (common) CONSTRUCT_TYPED_SFIELD(sfAmount, "Amount", AMOUNT, 1); @@ -225,6 +244,10 @@ CONSTRUCT_TYPED_SFIELD(sfMasterSignature, "MasterSignature", VL, CONSTRUCT_TYPED_SFIELD(sfUNLModifyValidator, "UNLModifyValidator", VL, 19); CONSTRUCT_TYPED_SFIELD(sfValidatorToDisable, "ValidatorToDisable", VL, 20); CONSTRUCT_TYPED_SFIELD(sfValidatorToReEnable, "ValidatorToReEnable", VL, 21); +CONSTRUCT_TYPED_SFIELD(sfHookStateData, "HookStateData", VL, 22); +CONSTRUCT_TYPED_SFIELD(sfHookReturnString, "HookReturnString", VL, 23); +CONSTRUCT_TYPED_SFIELD(sfHookParameterName, "HookParameterName", VL, 24); +CONSTRUCT_TYPED_SFIELD(sfHookParameterValue, "HookParameterValue", VL, 25); // account CONSTRUCT_TYPED_SFIELD(sfAccount, "Account", ACCOUNT, 1); @@ -235,6 +258,10 @@ CONSTRUCT_TYPED_SFIELD(sfAuthorize, "Authorize", ACCOUNT, CONSTRUCT_TYPED_SFIELD(sfUnauthorize, "Unauthorize", ACCOUNT, 6); // 7 is currently unused CONSTRUCT_TYPED_SFIELD(sfRegularKey, "RegularKey", ACCOUNT, 8); +CONSTRUCT_TYPED_SFIELD(sfEmitCallback, "EmitCallback", ACCOUNT, 10); + +// account (uncommon) +CONSTRUCT_TYPED_SFIELD(sfHookAccount, "HookAccount", ACCOUNT, 16); // vector of 256-bit CONSTRUCT_TYPED_SFIELD(sfIndexes, "Indexes", VECTOR256, 1, SField::sMD_Never); @@ -256,12 +283,19 @@ CONSTRUCT_UNTYPED_SFIELD(sfNewFields, "NewFields", OBJECT, CONSTRUCT_UNTYPED_SFIELD(sfTemplateEntry, "TemplateEntry", OBJECT, 9); CONSTRUCT_UNTYPED_SFIELD(sfMemo, "Memo", OBJECT, 10); CONSTRUCT_UNTYPED_SFIELD(sfSignerEntry, "SignerEntry", OBJECT, 11); +CONSTRUCT_UNTYPED_SFIELD(sfEmitDetails, "EmitDetails", OBJECT, 13); +CONSTRUCT_UNTYPED_SFIELD(sfHook, "Hook", OBJECT, 14); // inner object (uncommon) CONSTRUCT_UNTYPED_SFIELD(sfSigner, "Signer", OBJECT, 16); // 17 has not been used yet CONSTRUCT_UNTYPED_SFIELD(sfMajority, "Majority", OBJECT, 18); CONSTRUCT_UNTYPED_SFIELD(sfDisabledValidator, "DisabledValidator", OBJECT, 19); +CONSTRUCT_UNTYPED_SFIELD(sfEmittedTxn, "EmittedTxn", OBJECT, 20); +CONSTRUCT_UNTYPED_SFIELD(sfHookExecution, "HookExecution", OBJECT, 21); +CONSTRUCT_UNTYPED_SFIELD(sfHookDefinition, "HookDefinition", OBJECT, 22); +CONSTRUCT_UNTYPED_SFIELD(sfHookParameter, "HookParameter", OBJECT, 23); +CONSTRUCT_UNTYPED_SFIELD(sfHookGrant, "HookGrant", OBJECT, 24); // array of objects // ARRAY/1 is reserved for end of array @@ -273,10 +307,14 @@ CONSTRUCT_UNTYPED_SFIELD(sfNecessary, "Necessary", ARRAY, CONSTRUCT_UNTYPED_SFIELD(sfSufficient, "Sufficient", ARRAY, 7); CONSTRUCT_UNTYPED_SFIELD(sfAffectedNodes, "AffectedNodes", ARRAY, 8); CONSTRUCT_UNTYPED_SFIELD(sfMemos, "Memos", ARRAY, 9); +CONSTRUCT_UNTYPED_SFIELD(sfHooks, "Hooks", ARRAY, 11); // array of objects (uncommon) CONSTRUCT_UNTYPED_SFIELD(sfMajorities, "Majorities", ARRAY, 16); CONSTRUCT_UNTYPED_SFIELD(sfDisabledValidators, "DisabledValidators", ARRAY, 17); +CONSTRUCT_UNTYPED_SFIELD(sfHookExecutions, "HookExecutions", ARRAY, 18); +CONSTRUCT_UNTYPED_SFIELD(sfHookParameters, "HookParameters", ARRAY, 19); +CONSTRUCT_UNTYPED_SFIELD(sfHookGrants, "HookGrants", ARRAY, 20); // clang-format on diff --git a/src/ripple/protocol/impl/TxMeta.cpp b/src/ripple/protocol/impl/TxMeta.cpp index 6030ff89cde..9e199176515 100644 --- a/src/ripple/protocol/impl/TxMeta.cpp +++ b/src/ripple/protocol/impl/TxMeta.cpp @@ -112,7 +112,7 @@ TxMeta::setAffectedNode( } boost::container::flat_set -TxMeta::getAffectedAccounts(beast::Journal j) const +TxMeta::getAffectedAccounts() const { boost::container::flat_set list; list.reserve(10); @@ -147,6 +147,7 @@ TxMeta::getAffectedAccounts(beast::Journal j) const { const STAmount* lim = dynamic_cast(&field); + assert(lim); if (lim != nullptr) { @@ -155,11 +156,6 @@ TxMeta::getAffectedAccounts(beast::Journal j) const if (issuer.isNonZero()) list.insert(issuer); } - else - { - JLOG(j.fatal()) << "limit is not amount " - << field.getJson(JsonOptions::none); - } } } } diff --git a/src/ripple/protocol/jss.h b/src/ripple/protocol/jss.h index a227af42982..bd9edd02eff 100644 --- a/src/ripple/protocol/jss.h +++ b/src/ripple/protocol/jss.h @@ -41,6 +41,7 @@ namespace jss { error: Common properties of RPC error responses. */ +JSS(AL_size); // out: GetCounts JSS(AL_hit_rate); // out: GetCounts JSS(Account); // in: TransactionSign; field. JSS(AccountDelete); // transaction type. diff --git a/src/ripple/rpc/handlers/AccountCurrenciesHandler.cpp b/src/ripple/rpc/handlers/AccountCurrenciesHandler.cpp index bf90c66363c..d735e5976f1 100644 --- a/src/ripple/rpc/handlers/AccountCurrenciesHandler.cpp +++ b/src/ripple/rpc/handlers/AccountCurrenciesHandler.cpp @@ -18,7 +18,7 @@ //============================================================================== #include -#include +#include #include #include #include @@ -58,15 +58,13 @@ doAccountCurrencies(RPC::JsonContext& context) return rpcError(rpcACT_NOT_FOUND); std::set send, receive; - for (auto const& item : getRippleStateItems(accountID, *ledger)) + for (auto const& rspEntry : RPCTrustLine::getItems(accountID, *ledger)) { - auto const rspEntry = item.get(); + STAmount const& saBalance = rspEntry.getBalance(); - STAmount const& saBalance = rspEntry->getBalance(); - - if (saBalance < rspEntry->getLimit()) + if (saBalance < rspEntry.getLimit()) receive.insert(saBalance.getCurrency()); - if ((-saBalance) < rspEntry->getLimitPeer()) + if ((-saBalance) < rspEntry.getLimitPeer()) send.insert(saBalance.getCurrency()); } diff --git a/src/ripple/rpc/handlers/AccountInfo.cpp b/src/ripple/rpc/handlers/AccountInfo.cpp index ee192935856..417a3ffcd38 100644 --- a/src/ripple/rpc/handlers/AccountInfo.cpp +++ b/src/ripple/rpc/handlers/AccountInfo.cpp @@ -128,8 +128,7 @@ doAccountInfo(RPC::JsonContext& context) { Json::Value jvQueueData = Json::objectValue; - auto const txs = - context.app.getTxQ().getAccountTxs(accountID, *ledger); + auto const txs = context.app.getTxQ().getAccountTxs(accountID); if (!txs.empty()) { jvQueueData[jss::txn_count] = @@ -298,7 +297,7 @@ doAccountInfoGrpc( return {result, errorStatus}; } std::vector const txs = - context.app.getTxQ().getAccountTxs(accountID, *ledger); + context.app.getTxQ().getAccountTxs(accountID); org::xrpl::rpc::v1::QueueData& queueData = *result.mutable_queue_data(); RPC::convert(queueData, txs); diff --git a/src/ripple/rpc/handlers/AccountLines.cpp b/src/ripple/rpc/handlers/AccountLines.cpp index 1044dcc7239..843b9ddea56 100644 --- a/src/ripple/rpc/handlers/AccountLines.cpp +++ b/src/ripple/rpc/handlers/AccountLines.cpp @@ -18,7 +18,7 @@ //============================================================================== #include -#include +#include #include #include #include @@ -32,18 +32,17 @@ namespace ripple { struct VisitData { - std::vector items; + std::vector items; AccountID const& accountID; bool hasPeer; AccountID const& raPeerAccount; bool ignoreDefault; uint32_t foundCount; - RippleState::pointer lastFound; }; void -addLine(Json::Value& jsonLines, RippleState const& line) +addLine(Json::Value& jsonLines, RPCTrustLine const& line) { STAmount const& saBalance(line.getBalance()); STAmount const& saLimit(line.getLimit()); @@ -140,7 +139,7 @@ doAccountLines(RPC::JsonContext& context) Json::Value& jsonLines(result[jss::lines] = Json::arrayValue); VisitData visitData = { - {}, accountID, hasPeer, raPeerAccount, ignoreDefault, 0, nullptr}; + {}, accountID, hasPeer, raPeerAccount, ignoreDefault, 0}; uint256 startAfter = beast::zero; std::uint64_t startHint = 0; @@ -194,18 +193,6 @@ doAccountLines(RPC::JsonContext& context) limit + 1, [&visitData, &count, &marker, &limit, &nextHint]( std::shared_ptr const& sleCur) { - bool ignore = false; - if (visitData.ignoreDefault) - { - if (sleCur->getFieldAmount(sfLowLimit).getIssuer() == - visitData.accountID) - ignore = - !(sleCur->getFieldU32(sfFlags) & lsfLowReserve); - else - ignore = !( - sleCur->getFieldU32(sfFlags) & lsfHighReserve); - } - if (!sleCur) { assert(false); @@ -219,17 +206,32 @@ doAccountLines(RPC::JsonContext& context) RPC::getStartHint(sleCur, visitData.accountID); } + if (sleCur->getType() != ltRIPPLE_STATE) + return true; + + bool ignore = false; + if (visitData.ignoreDefault) + { + if (sleCur->getFieldAmount(sfLowLimit).getIssuer() == + visitData.accountID) + ignore = + !(sleCur->getFieldU32(sfFlags) & lsfLowReserve); + else + ignore = !( + sleCur->getFieldU32(sfFlags) & lsfHighReserve); + } + if (!ignore && count <= limit) { auto const line = - RippleState::makeItem(visitData.accountID, sleCur); + RPCTrustLine::makeItem(visitData.accountID, sleCur); - if (line != nullptr && + if (line && (!visitData.hasPeer || visitData.raPeerAccount == line->getAccountIDPeer())) { - visitData.items.emplace_back(line); + visitData.items.emplace_back(*line); } } @@ -253,7 +255,7 @@ doAccountLines(RPC::JsonContext& context) result[jss::account] = context.app.accountIDCache().toBase58(accountID); for (auto const& item : visitData.items) - addLine(jsonLines, *item.get()); + addLine(jsonLines, item); context.loadType = Resource::feeMediumBurdenRPC; return result; diff --git a/src/ripple/rpc/handlers/GatewayBalances.cpp b/src/ripple/rpc/handlers/GatewayBalances.cpp index cebf734fa7b..825a74ab843 100644 --- a/src/ripple/rpc/handlers/GatewayBalances.cpp +++ b/src/ripple/rpc/handlers/GatewayBalances.cpp @@ -18,7 +18,7 @@ //============================================================================== #include -#include +#include #include #include #include @@ -144,7 +144,7 @@ doGatewayBalances(RPC::JsonContext& context) { forEachItem( *ledger, accountID, [&](std::shared_ptr const& sle) { - auto rs = RippleState::makeItem(accountID, sle); + auto rs = PathFindTrustLine::makeItem(accountID, sle); if (!rs) return; diff --git a/src/ripple/rpc/handlers/GetCounts.cpp b/src/ripple/rpc/handlers/GetCounts.cpp index d59e7014b1a..acb306449df 100644 --- a/src/ripple/rpc/handlers/GetCounts.cpp +++ b/src/ripple/rpc/handlers/GetCounts.cpp @@ -109,6 +109,7 @@ getCountsJson(Application& app, int minObjectCount) static_cast(app.getInboundLedgers().fetchRate()); ret[jss::SLE_hit_rate] = app.cachedSLEs().rate(); ret[jss::ledger_hit_rate] = app.getLedgerMaster().getCacheHitRate(); + ret[jss::AL_size] = Json::UInt(app.getAcceptedLedgerCache().size()); ret[jss::AL_hit_rate] = app.getAcceptedLedgerCache().getHitRate(); ret[jss::fullbelow_size] = diff --git a/src/ripple/rpc/handlers/LedgerAccept.cpp b/src/ripple/rpc/handlers/LedgerAccept.cpp index 77a3c22286d..3a01a3950e1 100644 --- a/src/ripple/rpc/handlers/LedgerAccept.cpp +++ b/src/ripple/rpc/handlers/LedgerAccept.cpp @@ -34,7 +34,6 @@ namespace ripple { Json::Value doLedgerAccept(RPC::JsonContext& context) { - std::unique_lock lock{context.app.getMasterMutex()}; Json::Value jvResult; if (!context.app.config().standalone() || context.app.config().reporting()) @@ -43,8 +42,8 @@ doLedgerAccept(RPC::JsonContext& context) } else { + std::unique_lock lock{context.app.getMasterMutex()}; context.netOps.acceptLedger(); - jvResult[jss::ledger_current_index] = context.ledgerMaster.getCurrentLedgerIndex(); } diff --git a/src/ripple/rpc/handlers/LedgerHandler.cpp b/src/ripple/rpc/handlers/LedgerHandler.cpp index b2e4cb8dd15..e28b181fcba 100644 --- a/src/ripple/rpc/handlers/LedgerHandler.cpp +++ b/src/ripple/rpc/handlers/LedgerHandler.cpp @@ -94,7 +94,7 @@ LedgerHandler::check() return rpcINVALID_PARAMS; } - queueTxs_ = context_.app.getTxQ().getTxs(*ledger_); + queueTxs_ = context_.app.getTxQ().getTxs(); } return Status::OK; diff --git a/src/ripple/rpc/handlers/NoRippleCheck.cpp b/src/ripple/rpc/handlers/NoRippleCheck.cpp index 19b0b8056e9..2a6ab7ca4ed 100644 --- a/src/ripple/rpc/handlers/NoRippleCheck.cpp +++ b/src/ripple/rpc/handlers/NoRippleCheck.cpp @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include #include diff --git a/src/ripple/rpc/handlers/PathFind.cpp b/src/ripple/rpc/handlers/PathFind.cpp index 656744b68cf..9d6e0cff1ac 100644 --- a/src/ripple/rpc/handlers/PathFind.cpp +++ b/src/ripple/rpc/handlers/PathFind.cpp @@ -51,25 +51,25 @@ doPathFind(RPC::JsonContext& context) if (sSubCommand == "create") { context.loadType = Resource::feeHighBurdenRPC; - context.infoSub->clearPathRequest(); + context.infoSub->clearRequest(); return context.app.getPathRequests().makePathRequest( context.infoSub, lpLedger, context.params); } if (sSubCommand == "close") { - PathRequest::pointer request = context.infoSub->getPathRequest(); + InfoSubRequest::pointer request = context.infoSub->getRequest(); if (!request) return rpcError(rpcNO_PF_REQUEST); - context.infoSub->clearPathRequest(); - return request->doClose(context.params); + context.infoSub->clearRequest(); + return request->doClose(); } if (sSubCommand == "status") { - PathRequest::pointer request = context.infoSub->getPathRequest(); + InfoSubRequest::pointer request = context.infoSub->getRequest(); if (!request) return rpcError(rpcNO_PF_REQUEST); diff --git a/src/ripple/rpc/handlers/Stop.cpp b/src/ripple/rpc/handlers/Stop.cpp index dc62ff2ebea..9467556969d 100644 --- a/src/ripple/rpc/handlers/Stop.cpp +++ b/src/ripple/rpc/handlers/Stop.cpp @@ -32,9 +32,7 @@ struct JsonContext; Json::Value doStop(RPC::JsonContext& context) { - std::unique_lock lock{context.app.getMasterMutex()}; context.app.signalStop(); - return RPC::makeObjectValue(systemName() + " server stopping"); } diff --git a/src/ripple/rpc/impl/RPCHelpers.cpp b/src/ripple/rpc/impl/RPCHelpers.cpp index 5c42aae969b..c471d2b355a 100644 --- a/src/ripple/rpc/impl/RPCHelpers.cpp +++ b/src/ripple/rpc/impl/RPCHelpers.cpp @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/ripple/rpc/impl/TransactionSign.cpp b/src/ripple/rpc/impl/TransactionSign.cpp index 8dff9ee62da..ca24b68740e 100644 --- a/src/ripple/rpc/impl/TransactionSign.cpp +++ b/src/ripple/rpc/impl/TransactionSign.cpp @@ -223,7 +223,8 @@ checkPayment( if (auto ledger = app.openLedger().current()) { Pathfinder pf( - std::make_shared(ledger), + std::make_shared( + ledger, app.journal("RippleLineCache")), srcAddressID, *dstAccountID, sendMax.issue().currency, diff --git a/src/ripple/server/impl/Port.cpp b/src/ripple/server/impl/Port.cpp index 25d86025bda..1b869f6a5da 100644 --- a/src/ripple/server/impl/Port.cpp +++ b/src/ripple/server/impl/Port.cpp @@ -152,7 +152,7 @@ populate( v4Net = boost::asio::ip::make_network_v4(ip); v4 = true; } - catch (boost::system::system_error const& e) + catch (boost::system::system_error const&) { v6Net = boost::asio::ip::make_network_v6(ip); v4 = false; diff --git a/src/ripple/shamap/SHAMap.h b/src/ripple/shamap/SHAMap.h index b913bd5b1d9..1d221179c16 100644 --- a/src/ripple/shamap/SHAMap.h +++ b/src/ripple/shamap/SHAMap.h @@ -238,7 +238,7 @@ class SHAMap void visitDifferences( SHAMap const* have, - std::function) const; + std::function const&) const; /** Visit every leaf node in this SHAMap @@ -267,8 +267,7 @@ class SHAMap bool getNodeFat( SHAMapNodeID const& wanted, - std::vector& nodeIDs, - std::vector& rawNodes, + std::vector>& data, bool fatLeaves, std::uint32_t depth) const; diff --git a/src/ripple/shamap/SHAMapInnerNode.h b/src/ripple/shamap/SHAMapInnerNode.h index db7244e7019..5f0765e9c26 100644 --- a/src/ripple/shamap/SHAMapInnerNode.h +++ b/src/ripple/shamap/SHAMapInnerNode.h @@ -27,8 +27,10 @@ #include #include +#include #include #include +#include #include #include #include @@ -53,7 +55,8 @@ class SHAMapInnerNode final : public SHAMapTreeNode, std::uint32_t fullBelowGen_ = 0; std::uint16_t isBranch_ = 0; - static std::mutex childLock; + /** A bitlock for the children of this node, with one bit per child */ + mutable std::atomic lock_ = 0; /** Convert arrays stored in `hashesAndChildren_` so they can store the requested number of children. @@ -155,7 +158,7 @@ class SHAMapInnerNode final : public SHAMapTreeNode, std::shared_ptr getChild(int branch); - virtual std::shared_ptr + std::shared_ptr canonicalizeChild(int branch, std::shared_ptr node); // sync functions diff --git a/src/ripple/shamap/impl/SHAMapInnerNode.cpp b/src/ripple/shamap/impl/SHAMapInnerNode.cpp index 6a2a4504fea..c47ac3864ba 100644 --- a/src/ripple/shamap/impl/SHAMapInnerNode.cpp +++ b/src/ripple/shamap/impl/SHAMapInnerNode.cpp @@ -19,11 +19,9 @@ #include -#include #include #include #include -#include #include #include #include @@ -33,14 +31,88 @@ #include #include -#include #include -#include #include +// This is used for the _mm_pause instruction: +#include + namespace ripple { -std::mutex SHAMapInnerNode::childLock; +/** A specialized 16-way spinlock used to protect inner node branches. + + This class packs 16 separate spinlocks into a single 16-bit value. It makes + it possible to lock any one lock at once or, alternatively, all together. + + The implementation tries to use portable constructs but has to be low-level + for performance. + */ +class SpinBitlock +{ +private: + std::atomic& bits_; + std::uint16_t mask_; + +public: + SpinBitlock(std::atomic& lock) : bits_(lock), mask_(0xFFFF) + { + } + + SpinBitlock(std::atomic& lock, int index) + : bits_(lock), mask_(1 << index) + { + assert(index >= 0 && index < 16); + } + + [[nodiscard]] bool + try_lock() + { + // If we want to grab all the individual bitlocks at once we cannot + // use `fetch_or`! To see why, imagine that `lock_ == 0x0020` which + // means that the `fetch_or` would return `0x0020` but all the bits + // would already be (incorrectly!) set. Oops! + std::uint16_t expected = 0; + + if (mask_ != 0xFFFF) + return (bits_.fetch_or(mask_, std::memory_order_acquire) & mask_) == + expected; + + return bits_.compare_exchange_weak( + expected, + mask_, + std::memory_order_acquire, + std::memory_order_relaxed); + } + + void + lock() + { + // Testing suggests that 99.9999% of the time this will succeed, so + // we try to optimize the fast path. + if (try_lock()) + return; + + do + { + // We try to spin for a few times: + for (int i = 0; i != 100; ++i) + { + if (try_lock()) + return; + + _mm_pause(); + } + + std::this_thread::yield(); + } while ((bits_.load(std::memory_order_relaxed) & mask_) == 0); + } + + void + unlock() + { + bits_.fetch_and(~mask_, std::memory_order_release); + } +}; SHAMapInnerNode::SHAMapInnerNode( std::uint32_t cowid, @@ -108,7 +180,10 @@ SHAMapInnerNode::clone(std::uint32_t cowid) const cloneHashes[branchNum] = thisHashes[indexNum]; }); } - std::lock_guard lock(childLock); + + SpinBitlock sl(lock_); + std::lock_guard lock(sl); + if (thisIsSparse) { int cloneChildIndex = 0; @@ -341,8 +416,11 @@ SHAMapInnerNode::getChildPointer(int branch) assert(branch >= 0 && branch < branchFactor); assert(!isEmptyBranch(branch)); - std::lock_guard lock(childLock); - return hashesAndChildren_.getChildren()[*getChildIndex(branch)].get(); + auto const index = *getChildIndex(branch); + + SpinBitlock sl(lock_, index); + std::lock_guard lock(sl); + return hashesAndChildren_.getChildren()[index].get(); } std::shared_ptr @@ -351,8 +429,11 @@ SHAMapInnerNode::getChild(int branch) assert(branch >= 0 && branch < branchFactor); assert(!isEmptyBranch(branch)); - std::lock_guard lock(childLock); - return hashesAndChildren_.getChildren()[*getChildIndex(branch)]; + auto const index = *getChildIndex(branch); + + SpinBitlock sl(lock_, index); + std::lock_guard lock(sl); + return hashesAndChildren_.getChildren()[index]; } SHAMapHash const& @@ -377,7 +458,9 @@ SHAMapInnerNode::canonicalizeChild( auto [_, hashes, children] = hashesAndChildren_.getHashesAndChildren(); assert(node->getHash() == hashes[childIndex]); - std::lock_guard lock(childLock); + SpinBitlock sl(lock_, childIndex); + std::lock_guard lock(sl); + if (children[childIndex]) { // There is already a node hooked up, return it diff --git a/src/ripple/shamap/impl/SHAMapSync.cpp b/src/ripple/shamap/impl/SHAMapSync.cpp index 8cd5bd704ef..1bada85133d 100644 --- a/src/ripple/shamap/impl/SHAMapSync.cpp +++ b/src/ripple/shamap/impl/SHAMapSync.cpp @@ -52,7 +52,7 @@ SHAMap::visitNodes(std::function const& function) const auto node = std::static_pointer_cast(root_); int pos = 0; - while (1) + while (true) { while (pos < 16) { @@ -99,7 +99,7 @@ SHAMap::visitNodes(std::function const& function) const void SHAMap::visitDifferences( SHAMap const* have, - std::function function) const + std::function const& function) const { // Visit every node in this SHAMap that is not present // in the specified SHAMap @@ -426,8 +426,7 @@ SHAMap::getMissingNodes(int max, SHAMapSyncFilter* filter) bool SHAMap::getNodeFat( SHAMapNodeID const& wanted, - std::vector& nodeIDs, - std::vector& rawNodes, + std::vector>& data, bool fatLeaves, std::uint32_t depth) const { @@ -443,16 +442,15 @@ SHAMap::getNodeFat( auto inner = static_cast(node); if (inner->isEmptyBranch(branch)) return false; - node = descendThrow(inner, branch); nodeID = nodeID.getChildNodeID(branch); } if (node == nullptr || wanted != nodeID) { - JLOG(journal_.warn()) << "peer requested node that is not in the map:\n" - << wanted << " but found\n" - << nodeID; + JLOG(journal_.info()) + << "peer requested node that is not in the map: " << wanted + << " but found " << nodeID; return false; } @@ -465,18 +463,17 @@ SHAMap::getNodeFat( std::stack> stack; stack.emplace(node, nodeID, depth); + Serializer s(8192); + while (!stack.empty()) { std::tie(node, nodeID, depth) = stack.top(); stack.pop(); - { - // Add this node to the reply - Serializer s; - node->serializeForWire(s); - nodeIDs.push_back(nodeID); - rawNodes.push_back(std::move(s.modData())); - } + // Add this node to the reply + s.erase(); + node->serializeForWire(s); + data.emplace_back(std::make_pair(nodeID, s.getData())); if (node->isInner()) { @@ -484,6 +481,7 @@ SHAMap::getNodeFat( // without decrementing the depth auto inner = static_cast(node); int bc = inner->getBranchCount(); + if ((depth > 0) || (bc == 1)) { // We need to process this node's children @@ -492,7 +490,7 @@ SHAMap::getNodeFat( if (!inner->isEmptyBranch(i)) { auto const childNode = descendThrow(inner, i); - SHAMapNodeID const childID = nodeID.getChildNodeID(i); + auto const childID = nodeID.getChildNodeID(i); if (childNode->isInner() && ((depth > 1) || (bc == 1))) { @@ -506,10 +504,10 @@ SHAMap::getNodeFat( else if (childNode->isInner() || fatLeaves) { // Just include this node - Serializer ns; - childNode->serializeForWire(ns); - nodeIDs.push_back(childID); - rawNodes.push_back(std::move(ns.modData())); + s.erase(); + childNode->serializeForWire(s); + data.emplace_back( + std::make_pair(childID, s.getData())); } } } @@ -583,7 +581,6 @@ SHAMap::addKnownNode( } auto const generation = f_.getFullBelowCache(ledgerSeq_)->getGeneration(); - auto newNode = SHAMapTreeNode::makeFromWire(rawNode); SHAMapNodeID iNodeID; auto iNode = root_.get(); @@ -612,6 +609,8 @@ SHAMap::addKnownNode( if (iNode == nullptr) { + auto newNode = SHAMapTreeNode::makeFromWire(rawNode); + if (!newNode || childHash != newNode->getHash()) { JLOG(journal_.warn()) << "Corrupt node received"; diff --git a/src/ripple/shamap/impl/TaggedPointer.ipp b/src/ripple/shamap/impl/TaggedPointer.ipp index d1110f49387..30bf68426b6 100644 --- a/src/ripple/shamap/impl/TaggedPointer.ipp +++ b/src/ripple/shamap/impl/TaggedPointer.ipp @@ -17,9 +17,9 @@ */ //============================================================================== -#include - +#include #include +#include #include diff --git a/src/test/app/TxQ_test.cpp b/src/test/app/TxQ_test.cpp index 2ade9e8e307..f3170c9a27b 100644 --- a/src/test/app/TxQ_test.cpp +++ b/src/test/app/TxQ_test.cpp @@ -40,6 +40,7 @@ class TxQ1_test : public beast::unit_test::suite { void checkMetrics( + int line, jtx::Env& env, std::size_t expectedCount, std::optional expectedMaxCount, @@ -51,18 +52,81 @@ class TxQ1_test : public beast::unit_test::suite FeeLevel64 const expectedMin{expectedMinFeeLevel}; FeeLevel64 const expectedMed{expectedMedFeeLevel}; auto const metrics = env.app().getTxQ().getMetrics(*env.current()); - BEAST_EXPECT(metrics.referenceFeeLevel == FeeLevel64{256}); - BEAST_EXPECT(metrics.txCount == expectedCount); - BEAST_EXPECT(metrics.txQMaxSize == expectedMaxCount); - BEAST_EXPECT(metrics.txInLedger == expectedInLedger); - BEAST_EXPECT(metrics.txPerLedger == expectedPerLedger); - BEAST_EXPECT(metrics.minProcessingFeeLevel == expectedMin); - BEAST_EXPECT(metrics.medFeeLevel == expectedMed); - auto expectedCurFeeLevel = expectedInLedger > expectedPerLedger + using namespace std::string_literals; + + metrics.referenceFeeLevel == FeeLevel64{256} + ? pass() + : fail( + "reference: "s + + std::to_string(metrics.referenceFeeLevel.value()) + + "/256", + __FILE__, + line); + + metrics.txCount == expectedCount + ? pass() + : fail( + "txCount: "s + std::to_string(metrics.txCount) + "/" + + std::to_string(expectedCount), + __FILE__, + line); + + metrics.txQMaxSize == expectedMaxCount + ? pass() + : fail( + "txQMaxSize: "s + + std::to_string(metrics.txQMaxSize.value_or(0)) + "/" + + std::to_string(expectedMaxCount.value_or(0)), + __FILE__, + line); + + metrics.txInLedger == expectedInLedger + ? pass() + : fail( + "txInLedger: "s + std::to_string(metrics.txInLedger) + "/" + + std::to_string(expectedInLedger), + __FILE__, + line); + + metrics.txPerLedger == expectedPerLedger + ? pass() + : fail( + "txPerLedger: "s + std::to_string(metrics.txPerLedger) + "/" + + std::to_string(expectedPerLedger), + __FILE__, + line); + + metrics.minProcessingFeeLevel == expectedMin + ? pass() + : fail( + "minProcessingFeeLevel: "s + + std::to_string(metrics.minProcessingFeeLevel.value()) + + "/" + std::to_string(expectedMin.value()), + __FILE__, + line); + + metrics.medFeeLevel == expectedMed + ? pass() + : fail( + "medFeeLevel: "s + + std::to_string(metrics.medFeeLevel.value()) + "/" + + std::to_string(expectedMed.value()), + __FILE__, + line); + + auto const expectedCurFeeLevel = expectedInLedger > expectedPerLedger ? expectedMed * expectedInLedger * expectedInLedger / (expectedPerLedger * expectedPerLedger) : metrics.referenceFeeLevel; - BEAST_EXPECT(metrics.openLedgerFeeLevel == expectedCurFeeLevel); + + metrics.openLedgerFeeLevel == expectedCurFeeLevel + ? pass() + : fail( + "openLedgerFeeLevel: "s + + std::to_string(metrics.openLedgerFeeLevel.value()) + "/" + + std::to_string(expectedCurFeeLevel.value()), + __FILE__, + line); } void @@ -141,7 +205,7 @@ class TxQ1_test : public beast::unit_test::suite // transactions as though they are ordinary transactions. auto const flagPerLedger = 1 + ripple::detail::numUpVotedAmendments(); auto const flagMaxQueue = ledgersInQueue * flagPerLedger; - checkMetrics(env, 0, flagMaxQueue, 0, flagPerLedger, 256); + checkMetrics(__LINE__, env, 0, flagMaxQueue, 0, flagPerLedger, 256); // Pad a couple of txs with normal fees so the median comes // back down to normal @@ -152,7 +216,7 @@ class TxQ1_test : public beast::unit_test::suite // metrics to reset to defaults, EXCEPT the maxQueue size. using namespace std::chrono_literals; env.close(env.now() + 5s, 10000ms); - checkMetrics(env, 0, flagMaxQueue, 0, expectedPerLedger, 256); + checkMetrics(__LINE__, env, 0, flagMaxQueue, 0, expectedPerLedger, 256); auto const fees = env.current()->fees(); BEAST_EXPECT(fees.base == XRPAmount{base}); BEAST_EXPECT(fees.units == FeeUnit64{units}); @@ -186,37 +250,37 @@ class TxQ1_test : public beast::unit_test::suite BEAST_EXPECT(env.current()->fees().base == 10); - checkMetrics(env, 0, std::nullopt, 0, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3, 256); // Create several accounts while the fee is cheap so they all apply. env.fund(XRP(50000), noripple(alice, bob, charlie, daria)); - checkMetrics(env, 0, std::nullopt, 4, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 4, 3, 256); // Alice - price starts exploding: held env(noop(alice), queued); - checkMetrics(env, 1, std::nullopt, 4, 3, 256); + checkMetrics(__LINE__, env, 1, std::nullopt, 4, 3, 256); // Bob with really high fee - applies env(noop(bob), openLedgerFee(env)); - checkMetrics(env, 1, std::nullopt, 5, 3, 256); + checkMetrics(__LINE__, env, 1, std::nullopt, 5, 3, 256); // Daria with low fee: hold env(noop(daria), fee(1000), queued); - checkMetrics(env, 2, std::nullopt, 5, 3, 256); + checkMetrics(__LINE__, env, 2, std::nullopt, 5, 3, 256); env.close(); // Verify that the held transactions got applied - checkMetrics(env, 0, 10, 2, 5, 256); + checkMetrics(__LINE__, env, 0, 10, 2, 5, 256); ////////////////////////////////////////////////////////////// // Make some more accounts. We'll need them later to abuse the queue. env.fund(XRP(50000), noripple(elmo, fred, gwen, hank)); - checkMetrics(env, 0, 10, 6, 5, 256); + checkMetrics(__LINE__, env, 0, 10, 6, 5, 256); // Now get a bunch of transactions held. env(noop(alice), fee(12), queued); - checkMetrics(env, 1, 10, 6, 5, 256); + checkMetrics(__LINE__, env, 1, 10, 6, 5, 256); env(noop(bob), fee(10), queued); // won't clear the queue env(noop(charlie), fee(20), queued); @@ -225,11 +289,11 @@ class TxQ1_test : public beast::unit_test::suite env(noop(fred), fee(19), queued); env(noop(gwen), fee(16), queued); env(noop(hank), fee(18), queued); - checkMetrics(env, 8, 10, 6, 5, 256); + checkMetrics(__LINE__, env, 8, 10, 6, 5, 256); env.close(); // Verify that the held transactions got applied - checkMetrics(env, 1, 12, 7, 6, 256); + checkMetrics(__LINE__, env, 1, 12, 7, 6, 256); // Bob's transaction is still stuck in the queue. @@ -238,45 +302,45 @@ class TxQ1_test : public beast::unit_test::suite // Hank sends another txn env(noop(hank), fee(10), queued); // But he's not going to leave it in the queue - checkMetrics(env, 2, 12, 7, 6, 256); + checkMetrics(__LINE__, env, 2, 12, 7, 6, 256); // Hank sees his txn got held and bumps the fee, // but doesn't even bump it enough to requeue env(noop(hank), fee(11), ter(telCAN_NOT_QUEUE_FEE)); - checkMetrics(env, 2, 12, 7, 6, 256); + checkMetrics(__LINE__, env, 2, 12, 7, 6, 256); // Hank sees his txn got held and bumps the fee, // enough to requeue, but doesn't bump it enough to // apply to the ledger env(noop(hank), fee(6000), queued); // But he's not going to leave it in the queue - checkMetrics(env, 2, 12, 7, 6, 256); + checkMetrics(__LINE__, env, 2, 12, 7, 6, 256); // Hank sees his txn got held and bumps the fee, // high enough to get into the open ledger, because // he doesn't want to wait. env(noop(hank), openLedgerFee(env)); - checkMetrics(env, 1, 12, 8, 6, 256); + checkMetrics(__LINE__, env, 1, 12, 8, 6, 256); // Hank then sends another, less important txn // (In addition to the metrics, this will verify that // the original txn got removed.) env(noop(hank), fee(6000), queued); - checkMetrics(env, 2, 12, 8, 6, 256); + checkMetrics(__LINE__, env, 2, 12, 8, 6, 256); env.close(); // Verify that bob and hank's txns were applied - checkMetrics(env, 0, 16, 2, 8, 256); + checkMetrics(__LINE__, env, 0, 16, 2, 8, 256); // Close again with a simulated time leap to // reset the escalation limit down to minimum env.close(env.now() + 5s, 10000ms); - checkMetrics(env, 0, 16, 0, 3, 256); + checkMetrics(__LINE__, env, 0, 16, 0, 3, 256); // Then close once more without the time leap // to reset the queue maxsize down to minimum env.close(); - checkMetrics(env, 0, 6, 0, 3, 256); + checkMetrics(__LINE__, env, 0, 6, 0, 3, 256); ////////////////////////////////////////////////////////////// @@ -286,7 +350,7 @@ class TxQ1_test : public beast::unit_test::suite env(noop(gwen), fee(7000)); env(noop(fred), fee(7000)); env(noop(elmo), fee(7000)); - checkMetrics(env, 0, 6, 4, 3, 256); + checkMetrics(__LINE__, env, 0, 6, 4, 3, 256); // Use explicit fees so we can control which txn // will get dropped @@ -301,7 +365,7 @@ class TxQ1_test : public beast::unit_test::suite env(noop(alice), fee(20), queued); // Queue is full now. - checkMetrics(env, 6, 6, 4, 3, 385); + checkMetrics(__LINE__, env, 6, 6, 4, 3, 385); // Try to add another transaction with the default (low) fee, // it should fail because the queue is full. @@ -313,17 +377,17 @@ class TxQ1_test : public beast::unit_test::suite env(noop(charlie), fee(100), queued); // Queue is still full, of course, but the min fee has gone up - checkMetrics(env, 6, 6, 4, 3, 410); + checkMetrics(__LINE__, env, 6, 6, 4, 3, 410); // Close out the ledger, the transactions are accepted, the // queue is cleared, then the localTxs are retried. At this // point, daria's transaction that was dropped from the queue // is put back in. Neat. env.close(); - checkMetrics(env, 2, 8, 5, 4, 256, 256 * 700); + checkMetrics(__LINE__, env, 2, 8, 5, 4, 256, 256 * 700); env.close(); - checkMetrics(env, 0, 10, 2, 5, 256); + checkMetrics(__LINE__, env, 0, 10, 2, 5, 256); ////////////////////////////////////////////////////////////// @@ -337,10 +401,10 @@ class TxQ1_test : public beast::unit_test::suite env(noop(daria)); env(pay(alice, iris, XRP(1000)), queued); env(noop(iris), seq(1), fee(20), ter(terNO_ACCOUNT)); - checkMetrics(env, 1, 10, 6, 5, 256); + checkMetrics(__LINE__, env, 1, 10, 6, 5, 256); env.close(); - checkMetrics(env, 0, 12, 1, 6, 256); + checkMetrics(__LINE__, env, 0, 12, 1, 6, 256); env.require(balance(iris, XRP(1000))); BEAST_EXPECT(env.seq(iris) == 11); @@ -366,6 +430,7 @@ class TxQ1_test : public beast::unit_test::suite ++metrics.txCount; checkMetrics( + __LINE__, env, metrics.txCount, metrics.txQMaxSize, @@ -388,14 +453,14 @@ class TxQ1_test : public beast::unit_test::suite BEAST_EXPECT(env.current()->fees().base == 10); - checkMetrics(env, 0, std::nullopt, 0, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3, 256); // Fund alice and then fill the ledger. env.fund(XRP(50000), noripple(alice)); env(noop(alice)); env(noop(alice)); env(noop(alice)); - checkMetrics(env, 0, std::nullopt, 4, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 4, 3, 256); ////////////////////////////////////////////////////////////////// @@ -407,11 +472,11 @@ class TxQ1_test : public beast::unit_test::suite env(noop(alice), ticket::use(tkt1 - 2), ter(tefNO_TICKET)); env(noop(alice), ticket::use(tkt1 - 1), ter(terPRE_TICKET)); env.require(owners(alice, 0), tickets(alice, 0)); - checkMetrics(env, 1, std::nullopt, 4, 3, 256); + checkMetrics(__LINE__, env, 1, std::nullopt, 4, 3, 256); env.close(); env.require(owners(alice, 250), tickets(alice, 250)); - checkMetrics(env, 0, 8, 1, 4, 256); + checkMetrics(__LINE__, env, 0, 8, 1, 4, 256); BEAST_EXPECT(env.seq(alice) == tkt1 + 250); ////////////////////////////////////////////////////////////////// @@ -438,7 +503,7 @@ class TxQ1_test : public beast::unit_test::suite ticket::use(tkt1 + 13), fee(23), ter(telCAN_NOT_QUEUE_FULL)); - checkMetrics(env, 8, 8, 5, 4, 385); + checkMetrics(__LINE__, env, 8, 8, 5, 4, 385); // Check which of the queued transactions got into the ledger by // attempting to replace them. @@ -470,7 +535,7 @@ class TxQ1_test : public beast::unit_test::suite // the queue. env(noop(alice), ticket::use(tkt1 + 13), ter(telCAN_NOT_QUEUE_FEE)); - checkMetrics(env, 3, 10, 6, 5, 256); + checkMetrics(__LINE__, env, 3, 10, 6, 5, 256); ////////////////////////////////////////////////////////////////// @@ -501,7 +566,7 @@ class TxQ1_test : public beast::unit_test::suite env(noop(alice), seq(nextSeq + 5), queued); env(noop(alice), seq(nextSeq + 6), queued); env(noop(alice), seq(nextSeq + 7), ter(telCAN_NOT_QUEUE_FULL)); - checkMetrics(env, 10, 10, 6, 5, 257); + checkMetrics(__LINE__, env, 10, 10, 6, 5, 257); // Check which of the queued transactions got into the ledger by // attempting to replace them. @@ -529,7 +594,7 @@ class TxQ1_test : public beast::unit_test::suite env(noop(alice), seq(nextSeq + 6), ter(telCAN_NOT_QUEUE_FEE)); env(noop(alice), seq(nextSeq + 7), ter(telCAN_NOT_QUEUE_FEE)); - checkMetrics(env, 4, 12, 7, 6, 256); + checkMetrics(__LINE__, env, 4, 12, 7, 6, 256); BEAST_EXPECT(env.seq(alice) == nextSeq + 4); ////////////////////////////////////////////////////////////////// @@ -560,7 +625,7 @@ class TxQ1_test : public beast::unit_test::suite fee(21), ter(telCAN_NOT_QUEUE_FULL)); - checkMetrics(env, 10, 12, 7, 6, 256); + checkMetrics(__LINE__, env, 10, 12, 7, 6, 256); env.close(); env.require(owners(alice, 231), tickets(alice, 231)); @@ -591,7 +656,7 @@ class TxQ1_test : public beast::unit_test::suite env(noop(alice), seq(nextSeq + 7), ter(telCAN_NOT_QUEUE_FEE)); BEAST_EXPECT(env.seq(alice) == nextSeq + 6); - checkMetrics(env, 6, 14, 8, 7, 256); + checkMetrics(__LINE__, env, 6, 14, 8, 7, 256); ////////////////////////////////////////////////////////////////// @@ -628,7 +693,7 @@ class TxQ1_test : public beast::unit_test::suite env(noop(alice), seq(nextSeq + 7), ter(tefPAST_SEQ)); BEAST_EXPECT(env.seq(alice) == nextSeq + 8); - checkMetrics(env, 0, 16, 6, 8, 256); + checkMetrics(__LINE__, env, 0, 16, 6, 8, 256); } void @@ -643,28 +708,28 @@ class TxQ1_test : public beast::unit_test::suite auto gw = Account("gw"); auto USD = gw["USD"]; - checkMetrics(env, 0, std::nullopt, 0, 2, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 0, 2, 256); // Create accounts env.fund(XRP(50000), noripple(alice, gw)); - checkMetrics(env, 0, std::nullopt, 2, 2, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 2, 2, 256); env.close(); - checkMetrics(env, 0, 4, 0, 2, 256); + checkMetrics(__LINE__, env, 0, 4, 0, 2, 256); // Alice creates an unfunded offer while the ledger is not full env(offer(alice, XRP(1000), USD(1000)), ter(tecUNFUNDED_OFFER)); - checkMetrics(env, 0, 4, 1, 2, 256); + checkMetrics(__LINE__, env, 0, 4, 1, 2, 256); fillQueue(env, alice); - checkMetrics(env, 0, 4, 3, 2, 256); + checkMetrics(__LINE__, env, 0, 4, 3, 2, 256); // Alice creates an unfunded offer that goes in the queue env(offer(alice, XRP(1000), USD(1000)), ter(terQUEUED)); - checkMetrics(env, 1, 4, 3, 2, 256); + checkMetrics(__LINE__, env, 1, 4, 3, 2, 256); // The offer comes out of the queue env.close(); - checkMetrics(env, 0, 6, 1, 3, 256); + checkMetrics(__LINE__, env, 0, 6, 1, 3, 256); } void @@ -684,44 +749,44 @@ class TxQ1_test : public beast::unit_test::suite BEAST_EXPECT(env.current()->fees().base == 10); - checkMetrics(env, 0, std::nullopt, 0, 2, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 0, 2, 256); // Create several accounts while the fee is cheap so they all apply. env.fund(XRP(50000), noripple(alice, bob, charlie)); - checkMetrics(env, 0, std::nullopt, 3, 2, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 3, 2, 256); // Future transaction for Alice - fails env(noop(alice), openLedgerFee(env), seq(env.seq(alice) + 1), ter(terPRE_SEQ)); - checkMetrics(env, 0, std::nullopt, 3, 2, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 3, 2, 256); // Current transaction for Alice: held env(noop(alice), queued); - checkMetrics(env, 1, std::nullopt, 3, 2, 256); + checkMetrics(__LINE__, env, 1, std::nullopt, 3, 2, 256); // Alice - sequence is too far ahead, so won't queue. env(noop(alice), seq(env.seq(alice) + 2), ter(telCAN_NOT_QUEUE)); - checkMetrics(env, 1, std::nullopt, 3, 2, 256); + checkMetrics(__LINE__, env, 1, std::nullopt, 3, 2, 256); // Bob with really high fee - applies env(noop(bob), openLedgerFee(env)); - checkMetrics(env, 1, std::nullopt, 4, 2, 256); + checkMetrics(__LINE__, env, 1, std::nullopt, 4, 2, 256); // Daria with low fee: hold env(noop(charlie), fee(1000), queued); - checkMetrics(env, 2, std::nullopt, 4, 2, 256); + checkMetrics(__LINE__, env, 2, std::nullopt, 4, 2, 256); // Alice with normal fee: hold env(noop(alice), seq(env.seq(alice) + 1), queued); - checkMetrics(env, 3, std::nullopt, 4, 2, 256); + checkMetrics(__LINE__, env, 3, std::nullopt, 4, 2, 256); env.close(); // Verify that the held transactions got applied // Alice's bad transaction applied from the // Local Txs. - checkMetrics(env, 0, 8, 4, 4, 256); + checkMetrics(__LINE__, env, 0, 8, 4, 4, 256); } void @@ -742,7 +807,7 @@ class TxQ1_test : public beast::unit_test::suite auto queued = ter(terQUEUED); - checkMetrics(env, 0, std::nullopt, 0, 2, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 0, 2, 256); // Fund across several ledgers so the TxQ metrics stay restricted. env.fund(XRP(1000), noripple(alice, bob)); @@ -752,11 +817,11 @@ class TxQ1_test : public beast::unit_test::suite env.fund(XRP(1000), noripple(edgar, felicia)); env.close(env.now() + 5s, 10000ms); - checkMetrics(env, 0, std::nullopt, 0, 2, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 0, 2, 256); env(noop(bob)); env(noop(charlie)); env(noop(daria)); - checkMetrics(env, 0, std::nullopt, 3, 2, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 3, 2, 256); BEAST_EXPECT(env.current()->info().seq == 6); // Fail to queue an item with a low LastLedgerSeq @@ -771,10 +836,10 @@ class TxQ1_test : public beast::unit_test::suite env(noop(charlie), fee(7000), queued); env(noop(daria), fee(7000), queued); env(noop(edgar), fee(7000), queued); - checkMetrics(env, 5, std::nullopt, 3, 2, 256); + checkMetrics(__LINE__, env, 5, std::nullopt, 3, 2, 256); { auto& txQ = env.app().getTxQ(); - auto aliceStat = txQ.getAccountTxs(alice.id(), *env.current()); + auto aliceStat = txQ.getAccountTxs(alice.id()); BEAST_EXPECT(aliceStat.size() == 1); BEAST_EXPECT(aliceStat.begin()->feeLevel == FeeLevel64{256}); BEAST_EXPECT( @@ -782,20 +847,19 @@ class TxQ1_test : public beast::unit_test::suite *aliceStat.begin()->lastValid == 8); BEAST_EXPECT(!aliceStat.begin()->consequences.isBlocker()); - auto bobStat = txQ.getAccountTxs(bob.id(), *env.current()); + auto bobStat = txQ.getAccountTxs(bob.id()); BEAST_EXPECT(bobStat.size() == 1); BEAST_EXPECT( bobStat.begin()->feeLevel == FeeLevel64{7000 * 256 / 10}); BEAST_EXPECT(!bobStat.begin()->lastValid); BEAST_EXPECT(!bobStat.begin()->consequences.isBlocker()); - auto noStat = - txQ.getAccountTxs(Account::master.id(), *env.current()); + auto noStat = txQ.getAccountTxs(Account::master.id()); BEAST_EXPECT(noStat.empty()); } env.close(); - checkMetrics(env, 1, 6, 4, 3, 256); + checkMetrics(__LINE__, env, 1, 6, 4, 3, 256); // Keep alice's transaction waiting. env(noop(bob), fee(7000), queued); @@ -803,11 +867,11 @@ class TxQ1_test : public beast::unit_test::suite env(noop(daria), fee(7000), queued); env(noop(edgar), fee(7000), queued); env(noop(felicia), fee(6999), queued); - checkMetrics(env, 6, 6, 4, 3, 257); + checkMetrics(__LINE__, env, 6, 6, 4, 3, 257); env.close(); // alice's transaction is still hanging around - checkMetrics(env, 1, 8, 5, 4, 256, 700 * 256); + checkMetrics(__LINE__, env, 1, 8, 5, 4, 256, 700 * 256); BEAST_EXPECT(env.seq(alice) == 3); // Keep alice's transaction waiting. @@ -818,19 +882,19 @@ class TxQ1_test : public beast::unit_test::suite env(noop(edgar), fee(8000), queued); env(noop(felicia), fee(7999), queued); env(noop(felicia), fee(7999), seq(env.seq(felicia) + 1), queued); - checkMetrics(env, 8, 8, 5, 4, 257, 700 * 256); + checkMetrics(__LINE__, env, 8, 8, 5, 4, 257, 700 * 256); env.close(); // alice's transaction expired without getting // into the ledger, so her transaction is gone, // though one of felicia's is still in the queue. - checkMetrics(env, 1, 10, 6, 5, 256, 700 * 256); + checkMetrics(__LINE__, env, 1, 10, 6, 5, 256, 700 * 256); BEAST_EXPECT(env.seq(alice) == 3); BEAST_EXPECT(env.seq(felicia) == 7); env.close(); // And now the queue is empty - checkMetrics(env, 0, 12, 1, 6, 256, 800 * 256); + checkMetrics(__LINE__, env, 0, 12, 1, 6, 256, 800 * 256); BEAST_EXPECT(env.seq(alice) == 3); BEAST_EXPECT(env.seq(felicia) == 8); } @@ -850,7 +914,7 @@ class TxQ1_test : public beast::unit_test::suite auto queued = ter(terQUEUED); - checkMetrics(env, 0, std::nullopt, 0, 2, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 0, 2, 256); // Fund across several ledgers so the TxQ metrics stay restricted. env.fund(XRP(1000), noripple(alice, bob)); @@ -862,21 +926,21 @@ class TxQ1_test : public beast::unit_test::suite env(noop(alice)); env(noop(alice)); env(noop(alice)); - checkMetrics(env, 0, std::nullopt, 3, 2, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 3, 2, 256); env(noop(bob), queued); - checkMetrics(env, 1, std::nullopt, 3, 2, 256); + checkMetrics(__LINE__, env, 1, std::nullopt, 3, 2, 256); // Since Alice's queue is empty this blocker can go into her queue. env(regkey(alice, bob), fee(0), queued); - checkMetrics(env, 2, std::nullopt, 3, 2, 256); + checkMetrics(__LINE__, env, 2, std::nullopt, 3, 2, 256); // Close out this ledger so we can get a maxsize env.close(); - checkMetrics(env, 0, 6, 2, 3, 256); + checkMetrics(__LINE__, env, 0, 6, 2, 3, 256); fillQueue(env, alice); - checkMetrics(env, 0, 6, 4, 3, 256); + checkMetrics(__LINE__, env, 0, 6, 4, 3, 256); auto feeAlice = 30; auto seqAlice = env.seq(alice); @@ -886,12 +950,12 @@ class TxQ1_test : public beast::unit_test::suite feeAlice = (feeAlice + 1) * 125 / 100; ++seqAlice; } - checkMetrics(env, 4, 6, 4, 3, 256); + checkMetrics(__LINE__, env, 4, 6, 4, 3, 256); // Bob adds a zero fee blocker to his queue. auto const seqBob = env.seq(bob); env(regkey(bob, alice), fee(0), queued); - checkMetrics(env, 5, 6, 4, 3, 256); + checkMetrics(__LINE__, env, 5, 6, 4, 3, 256); // Carol fills the queue. auto feeCarol = feeAlice; @@ -902,7 +966,7 @@ class TxQ1_test : public beast::unit_test::suite feeCarol = (feeCarol + 1) * 125 / 100; ++seqCarol; } - checkMetrics(env, 6, 6, 4, 3, 3 * 256 + 1); + checkMetrics(__LINE__, env, 6, 6, 4, 3, 3 * 256 + 1); // Carol submits high enough to beat Bob's average fee which kicks // out Bob's queued transaction. However Bob's transaction stays @@ -913,20 +977,20 @@ class TxQ1_test : public beast::unit_test::suite env.close(); // Some of Alice's transactions stay in the queue. Bob's // transaction returns to the TxQ. - checkMetrics(env, 5, 8, 5, 4, 256); + checkMetrics(__LINE__, env, 5, 8, 5, 4, 256); BEAST_EXPECT(env.seq(alice) == seqAlice - 4); BEAST_EXPECT(env.seq(bob) == seqBob); BEAST_EXPECT(env.seq(carol) == seqCarol + 1); env.close(); // The remaining queued transactions flush through to the ledger. - checkMetrics(env, 0, 10, 5, 5, 256); + checkMetrics(__LINE__, env, 0, 10, 5, 5, 256); BEAST_EXPECT(env.seq(alice) == seqAlice); BEAST_EXPECT(env.seq(bob) == seqBob + 1); BEAST_EXPECT(env.seq(carol) == seqCarol + 1); env.close(); - checkMetrics(env, 0, 10, 0, 5, 256); + checkMetrics(__LINE__, env, 0, 10, 0, 5, 256); BEAST_EXPECT(env.seq(alice) == seqAlice); BEAST_EXPECT(env.seq(bob) == seqBob + 1); BEAST_EXPECT(env.seq(carol) == seqCarol + 1); @@ -969,19 +1033,19 @@ class TxQ1_test : public beast::unit_test::suite auto queued = ter(terQUEUED); - checkMetrics(env, 0, std::nullopt, 0, 2, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 0, 2, 256); env.fund(XRP(1000), noripple(alice, bob)); - checkMetrics(env, 0, std::nullopt, 2, 2, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 2, 2, 256); // Fill the ledger env(noop(alice)); - checkMetrics(env, 0, std::nullopt, 3, 2, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 3, 2, 256); // Put a transaction in the queue env(noop(alice), queued); - checkMetrics(env, 1, std::nullopt, 3, 2, 256); + checkMetrics(__LINE__, env, 1, std::nullopt, 3, 2, 256); // Now cheat, and bypass the queue. { @@ -999,12 +1063,12 @@ class TxQ1_test : public beast::unit_test::suite }); env.postconditions(jt, ter, didApply); } - checkMetrics(env, 1, std::nullopt, 4, 2, 256); + checkMetrics(__LINE__, env, 1, std::nullopt, 4, 2, 256); env.close(); // Alice's queued transaction failed in TxQ::accept // with tefPAST_SEQ - checkMetrics(env, 0, 8, 0, 4, 256); + checkMetrics(__LINE__, env, 0, 8, 0, 4, 256); } void @@ -1028,7 +1092,7 @@ class TxQ1_test : public beast::unit_test::suite BEAST_EXPECT(env.current()->fees().base == 10); - checkMetrics(env, 0, std::nullopt, 0, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3, 256); // ledgers in queue is 2 because of makeConfig auto const initQueueMax = initFee(env, 3, 2, 10, 10, 200, 50); @@ -1036,11 +1100,11 @@ class TxQ1_test : public beast::unit_test::suite // Create several accounts while the fee is cheap so they all apply. env.fund(drops(2000), noripple(alice)); env.fund(XRP(500000), noripple(bob, charlie, daria)); - checkMetrics(env, 0, initQueueMax, 4, 3, 256); + checkMetrics(__LINE__, env, 0, initQueueMax, 4, 3, 256); // Alice - price starts exploding: held env(noop(alice), fee(11), queued); - checkMetrics(env, 1, initQueueMax, 4, 3, 256); + checkMetrics(__LINE__, env, 1, initQueueMax, 4, 3, 256); auto aliceSeq = env.seq(alice); auto bobSeq = env.seq(bob); @@ -1048,28 +1112,28 @@ class TxQ1_test : public beast::unit_test::suite // Alice - try to queue a second transaction, but leave a gap env(noop(alice), seq(aliceSeq + 2), fee(100), ter(telCAN_NOT_QUEUE)); - checkMetrics(env, 1, initQueueMax, 4, 3, 256); + checkMetrics(__LINE__, env, 1, initQueueMax, 4, 3, 256); // Alice - queue a second transaction. Yay! env(noop(alice), seq(aliceSeq + 1), fee(13), queued); - checkMetrics(env, 2, initQueueMax, 4, 3, 256); + checkMetrics(__LINE__, env, 2, initQueueMax, 4, 3, 256); // Alice - queue a third transaction. Yay. env(noop(alice), seq(aliceSeq + 2), fee(17), queued); - checkMetrics(env, 3, initQueueMax, 4, 3, 256); + checkMetrics(__LINE__, env, 3, initQueueMax, 4, 3, 256); // Bob - queue a transaction env(noop(bob), queued); - checkMetrics(env, 4, initQueueMax, 4, 3, 256); + checkMetrics(__LINE__, env, 4, initQueueMax, 4, 3, 256); // Bob - queue a second transaction env(noop(bob), seq(bobSeq + 1), fee(50), queued); - checkMetrics(env, 5, initQueueMax, 4, 3, 256); + checkMetrics(__LINE__, env, 5, initQueueMax, 4, 3, 256); // Charlie - queue a transaction, with a higher fee // than default env(noop(charlie), fee(15), queued); - checkMetrics(env, 6, initQueueMax, 4, 3, 256); + checkMetrics(__LINE__, env, 6, initQueueMax, 4, 3, 256); BEAST_EXPECT(env.seq(alice) == aliceSeq); BEAST_EXPECT(env.seq(bob) == bobSeq); @@ -1078,7 +1142,7 @@ class TxQ1_test : public beast::unit_test::suite env.close(); // Verify that all of but one of the queued transactions // got applied. - checkMetrics(env, 1, 8, 5, 4, 256); + checkMetrics(__LINE__, env, 1, 8, 5, 4, 256); // Verify that the stuck transaction is Bob's second. // Even though it had a higher fee than Alice's and @@ -1100,10 +1164,10 @@ class TxQ1_test : public beast::unit_test::suite queued); ++aliceSeq; } - checkMetrics(env, 8, 8, 5, 4, 513); + checkMetrics(__LINE__, env, 8, 8, 5, 4, 513); { auto& txQ = env.app().getTxQ(); - auto aliceStat = txQ.getAccountTxs(alice.id(), *env.current()); + auto aliceStat = txQ.getAccountTxs(alice.id()); aliceFee = 27; auto const& baseFee = env.current()->fees().base; auto seq = env.seq(alice); @@ -1131,24 +1195,24 @@ class TxQ1_test : public beast::unit_test::suite json(jss::LastLedgerSequence, lastLedgerSeq + 7), fee(aliceFee), ter(telCAN_NOT_QUEUE_FULL)); - checkMetrics(env, 8, 8, 5, 4, 513); + checkMetrics(__LINE__, env, 8, 8, 5, 4, 513); // Charlie - try to add another item to the queue, // which fails because fee is lower than Alice's // queued average. env(noop(charlie), fee(19), ter(telCAN_NOT_QUEUE_FULL)); - checkMetrics(env, 8, 8, 5, 4, 513); + checkMetrics(__LINE__, env, 8, 8, 5, 4, 513); // Charlie - add another item to the queue, which // causes Alice's last txn to drop env(noop(charlie), fee(30), queued); - checkMetrics(env, 8, 8, 5, 4, 538); + checkMetrics(__LINE__, env, 8, 8, 5, 4, 538); // Alice - now attempt to add one more to the queue, // which fails because the last tx was dropped, so // there is no complete chain. env(noop(alice), seq(aliceSeq), fee(aliceFee), ter(telCAN_NOT_QUEUE)); - checkMetrics(env, 8, 8, 5, 4, 538); + checkMetrics(__LINE__, env, 8, 8, 5, 4, 538); // Alice wants this tx more than the dropped tx, // so resubmits with higher fee, but the queue @@ -1157,7 +1221,7 @@ class TxQ1_test : public beast::unit_test::suite seq(aliceSeq - 1), fee(aliceFee), ter(telCAN_NOT_QUEUE_FULL)); - checkMetrics(env, 8, 8, 5, 4, 538); + checkMetrics(__LINE__, env, 8, 8, 5, 4, 538); // Try to replace a middle item in the queue // without enough fee. @@ -1167,18 +1231,18 @@ class TxQ1_test : public beast::unit_test::suite seq(aliceSeq), fee(aliceFee), ter(telCAN_NOT_QUEUE_FEE)); - checkMetrics(env, 8, 8, 5, 4, 538); + checkMetrics(__LINE__, env, 8, 8, 5, 4, 538); // Replace a middle item from the queue successfully ++aliceFee; env(noop(alice), seq(aliceSeq), fee(aliceFee), queued); - checkMetrics(env, 8, 8, 5, 4, 538); + checkMetrics(__LINE__, env, 8, 8, 5, 4, 538); env.close(); // Alice's transactions processed, along with // Charlie's, and the lost one is replayed and // added back to the queue. - checkMetrics(env, 4, 10, 6, 5, 256); + checkMetrics(__LINE__, env, 4, 10, 6, 5, 256); aliceSeq = env.seq(alice) + 1; @@ -1192,18 +1256,18 @@ class TxQ1_test : public beast::unit_test::suite seq(aliceSeq), fee(aliceFee), ter(telCAN_NOT_QUEUE_BALANCE)); - checkMetrics(env, 4, 10, 6, 5, 256); + checkMetrics(__LINE__, env, 4, 10, 6, 5, 256); // Try to spend more than Alice can afford with all the other txs. aliceSeq += 2; env(noop(alice), seq(aliceSeq), fee(aliceFee), ter(terINSUF_FEE_B)); - checkMetrics(env, 4, 10, 6, 5, 256); + checkMetrics(__LINE__, env, 4, 10, 6, 5, 256); // Replace the last queued item with a transaction that will // bankrupt Alice --aliceFee; env(noop(alice), seq(aliceSeq), fee(aliceFee), queued); - checkMetrics(env, 4, 10, 6, 5, 256); + checkMetrics(__LINE__, env, 4, 10, 6, 5, 256); // Alice - Attempt to queue a last transaction, but it // fails because the fee in flight is too high, before @@ -1214,14 +1278,14 @@ class TxQ1_test : public beast::unit_test::suite seq(aliceSeq), fee(aliceFee), ter(telCAN_NOT_QUEUE_BALANCE)); - checkMetrics(env, 4, 10, 6, 5, 256); + checkMetrics(__LINE__, env, 4, 10, 6, 5, 256); env.close(); // All of Alice's transactions applied. - checkMetrics(env, 0, 12, 4, 6, 256); + checkMetrics(__LINE__, env, 0, 12, 4, 6, 256); env.close(); - checkMetrics(env, 0, 12, 0, 6, 256); + checkMetrics(__LINE__, env, 0, 12, 0, 6, 256); // Alice is broke env.require(balance(alice, XRP(0))); @@ -1231,17 +1295,17 @@ class TxQ1_test : public beast::unit_test::suite // account limit (10) txs. fillQueue(env, bob); bobSeq = env.seq(bob); - checkMetrics(env, 0, 12, 7, 6, 256); + checkMetrics(__LINE__, env, 0, 12, 7, 6, 256); for (int i = 0; i < 10; ++i) env(noop(bob), seq(bobSeq + i), queued); - checkMetrics(env, 10, 12, 7, 6, 256); + checkMetrics(__LINE__, env, 10, 12, 7, 6, 256); // Bob hit the single account limit env(noop(bob), seq(bobSeq + 10), ter(telCAN_NOT_QUEUE_FULL)); - checkMetrics(env, 10, 12, 7, 6, 256); + checkMetrics(__LINE__, env, 10, 12, 7, 6, 256); // Bob can replace one of the earlier txs regardless // of the limit env(noop(bob), seq(bobSeq + 5), fee(20), queued); - checkMetrics(env, 10, 12, 7, 6, 256); + checkMetrics(__LINE__, env, 10, 12, 7, 6, 256); // Try to replace a middle item in the queue // with enough fee to bankrupt bob and make the @@ -1252,7 +1316,7 @@ class TxQ1_test : public beast::unit_test::suite seq(bobSeq + 5), fee(bobFee), ter(telCAN_NOT_QUEUE_BALANCE)); - checkMetrics(env, 10, 12, 7, 6, 256); + checkMetrics(__LINE__, env, 10, 12, 7, 6, 256); // Attempt to replace a middle item in the queue with enough fee // to bankrupt bob, and also to use fee averaging to clear out the @@ -1266,14 +1330,14 @@ class TxQ1_test : public beast::unit_test::suite seq(bobSeq + 5), fee(bobFee), ter(telCAN_NOT_QUEUE_BALANCE)); - checkMetrics(env, 10, 12, 7, 6, 256); + checkMetrics(__LINE__, env, 10, 12, 7, 6, 256); // Close the ledger and verify that the queued transactions succeed // and bob has the right ending balance. env.close(); - checkMetrics(env, 3, 14, 8, 7, 256); + checkMetrics(__LINE__, env, 3, 14, 8, 7, 256); env.close(); - checkMetrics(env, 0, 16, 3, 8, 256); + checkMetrics(__LINE__, env, 0, 16, 3, 8, 256); env.require(balance(bob, drops(499'999'999'750))); } @@ -1299,20 +1363,20 @@ class TxQ1_test : public beast::unit_test::suite BEAST_EXPECT(env.current()->fees().base == 10); - checkMetrics(env, 0, std::nullopt, 0, 4, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 0, 4, 256); // Create several accounts while the fee is cheap so they all apply. env.fund(XRP(50000), noripple(alice, bob, charlie, daria)); - checkMetrics(env, 0, std::nullopt, 4, 4, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 4, 4, 256); env.close(); - checkMetrics(env, 0, 8, 0, 4, 256); + checkMetrics(__LINE__, env, 0, 8, 0, 4, 256); env.fund(XRP(50000), noripple(elmo, fred, gwen, hank)); - checkMetrics(env, 0, 8, 4, 4, 256); + checkMetrics(__LINE__, env, 0, 8, 4, 4, 256); env.close(); - checkMetrics(env, 0, 8, 0, 4, 256); + checkMetrics(__LINE__, env, 0, 8, 0, 4, 256); ////////////////////////////////////////////////////////////// @@ -1323,7 +1387,7 @@ class TxQ1_test : public beast::unit_test::suite env(noop(gwen)); env(noop(fred)); env(noop(elmo)); - checkMetrics(env, 0, 8, 5, 4, 256); + checkMetrics(__LINE__, env, 0, 8, 5, 4, 256); auto aliceSeq = env.seq(alice); auto bobSeq = env.seq(bob); @@ -1336,8 +1400,8 @@ class TxQ1_test : public beast::unit_test::suite // This time, use identical fees. - // This one gets into the queue, but gets dropped when the - // higher fee one is added later. + // All of these get into the queue, but one gets dropped when the + // higher fee one is added later. Which one depends on ordering. env(noop(alice), fee(15), queued); env(noop(bob), fee(15), queued); env(noop(charlie), fee(15), queued); @@ -1349,7 +1413,7 @@ class TxQ1_test : public beast::unit_test::suite // Queue is full now. Minimum fee now reflects the // lowest fee in the queue. - checkMetrics(env, 8, 8, 5, 4, 385); + checkMetrics(__LINE__, env, 8, 8, 5, 4, 385); // Try to add another transaction with the default (low) fee, // it should fail because it can't replace the one already @@ -1362,60 +1426,165 @@ class TxQ1_test : public beast::unit_test::suite env(noop(charlie), fee(100), seq(charlieSeq + 1), queued); // Queue is still full. - checkMetrics(env, 8, 8, 5, 4, 385); + checkMetrics(__LINE__, env, 8, 8, 5, 4, 385); - // bob, charlie, daria, elmo, and fred's txs - // are processed out of the queue into the ledger, - // leaving fred and hank's txs. alice's tx is - // retried from localTxs, and put back into the - // queue. + // Six txs are processed out of the queue into the ledger, + // leaving two txs. The dropped tx is retried from localTxs, and + // put back into the queue. env.close(); - checkMetrics(env, 3, 10, 6, 5, 256); - - BEAST_EXPECT(aliceSeq + 1 == env.seq(alice)); - BEAST_EXPECT(bobSeq + 1 == env.seq(bob)); - BEAST_EXPECT(charlieSeq == env.seq(charlie)); - BEAST_EXPECT(dariaSeq + 1 == env.seq(daria)); - BEAST_EXPECT(elmoSeq == env.seq(elmo)); - BEAST_EXPECT(fredSeq + 1 == env.seq(fred)); - BEAST_EXPECT(gwenSeq + 1 == env.seq(gwen)); - BEAST_EXPECT(hankSeq + 1 == env.seq(hank)); - - aliceSeq = env.seq(alice); - bobSeq = env.seq(bob); - charlieSeq = env.seq(charlie); - dariaSeq = env.seq(daria); - elmoSeq = env.seq(elmo); - fredSeq = env.seq(fred); + checkMetrics(__LINE__, env, 3, 10, 6, 5, 256); + + // This next test should remain unchanged regardless of + // transaction ordering + BEAST_EXPECT( + aliceSeq + bobSeq + charlieSeq + dariaSeq + elmoSeq + fredSeq + + gwenSeq + hankSeq + 6 == + env.seq(alice) + env.seq(bob) + env.seq(charlie) + env.seq(daria) + + env.seq(elmo) + env.seq(fred) + env.seq(gwen) + env.seq(hank)); + // These tests may change if TxQ ordering is changed + using namespace std::string_literals; + BEAST_EXPECTS( + aliceSeq + 1 == env.seq(alice), + "alice: "s + std::to_string(aliceSeq) + ", " + + std::to_string(env.seq(alice))); + BEAST_EXPECTS( + bobSeq + 1 == env.seq(bob), + "bob: "s + std::to_string(bobSeq) + ", " + + std::to_string(env.seq(bob))); + BEAST_EXPECTS( + charlieSeq + 2 == env.seq(charlie), + "charlie: "s + std::to_string(charlieSeq) + ", " + + std::to_string(env.seq(charlie))); + BEAST_EXPECTS( + dariaSeq == env.seq(daria), + "daria: "s + std::to_string(dariaSeq) + ", " + + std::to_string(env.seq(daria))); + BEAST_EXPECTS( + elmoSeq + 1 == env.seq(elmo), + "elmo: "s + std::to_string(elmoSeq) + ", " + + std::to_string(env.seq(elmo))); + BEAST_EXPECTS( + fredSeq == env.seq(fred), + "fred: "s + std::to_string(fredSeq) + ", " + + std::to_string(env.seq(fred))); + BEAST_EXPECTS( + gwenSeq + 1 == env.seq(gwen), + "gwen: "s + std::to_string(gwenSeq) + ", " + + std::to_string(env.seq(gwen))); + BEAST_EXPECTS( + hankSeq == env.seq(hank), + "hank: "s + std::to_string(hankSeq) + ", " + + std::to_string(env.seq(hank))); + + // Which sequences get incremented may change if TxQ ordering is + // changed + ++aliceSeq; + ++bobSeq; + ++(++charlieSeq); + // ++dariaSeq; + ++elmoSeq; + // ++fredSeq; + ++gwenSeq; + // ++hankSeq; + + auto getTxsQueued = [&]() { + auto const txs = env.app().getTxQ().getTxs(); + std::map result; + for (auto const& tx : txs) + { + ++result[tx.txn->at(sfAccount)]; + } + return result; + }; + auto qTxCount1 = getTxsQueued(); + BEAST_EXPECT(qTxCount1.size() <= 3); // Fill up the queue again - env(noop(fred), fee(15), queued); - env(noop(fred), seq(fredSeq + 1), fee(15), queued); - env(noop(fred), seq(fredSeq + 2), fee(15), queued); - env(noop(bob), fee(15), queued); - env(noop(charlie), seq(charlieSeq + 2), fee(15), queued); - env(noop(daria), fee(15), queued); - // This one gets into the queue, but gets dropped when the - // higher fee one is added later. - env(noop(elmo), seq(elmoSeq + 1), fee(15), queued); - checkMetrics(env, 10, 10, 6, 5, 385); + env(noop(alice), + seq(aliceSeq + qTxCount1[alice.id()]++), + fee(15), + queued); + env(noop(bob), seq(bobSeq + qTxCount1[bob.id()]++), fee(15), queued); + env(noop(charlie), + seq(charlieSeq + qTxCount1[charlie.id()]++), + fee(15), + queued); + env(noop(daria), + seq(dariaSeq + qTxCount1[daria.id()]++), + fee(15), + queued); + env(noop(elmo), seq(elmoSeq + qTxCount1[elmo.id()]++), fee(15), queued); + env(noop(fred), seq(fredSeq + qTxCount1[fred.id()]++), fee(15), queued); + env(noop(gwen), seq(gwenSeq + qTxCount1[gwen.id()]++), fee(15), queued); + checkMetrics(__LINE__, env, 10, 10, 6, 5, 385); // Add another transaction, with a higher fee, // Not high enough to get into the ledger, but high // enough to get into the queue (and kick somebody out) - env(noop(fred), fee(100), seq(fredSeq + 3), queued); + env(noop(alice), + fee(100), + seq(aliceSeq + qTxCount1[alice.id()]++), + queued); + checkMetrics(__LINE__, env, 10, 10, 6, 5, 385); + + // Seven txs are processed out of the queue, leaving 3. One + // dropped tx is retried from localTxs, and put back into the + // queue. env.close(); - checkMetrics(env, 4, 12, 7, 6, 256); - - BEAST_EXPECT(fredSeq + 4 == env.seq(fred)); - BEAST_EXPECT(gwenSeq + 1 == env.seq(gwen)); - BEAST_EXPECT(hankSeq + 1 == env.seq(hank)); - BEAST_EXPECT(aliceSeq == env.seq(alice)); - BEAST_EXPECT(bobSeq + 1 == env.seq(bob)); - BEAST_EXPECT(charlieSeq + 2 == env.seq(charlie)); - BEAST_EXPECT(dariaSeq == env.seq(daria)); - BEAST_EXPECT(elmoSeq == env.seq(elmo)); + checkMetrics(__LINE__, env, 4, 12, 7, 6, 256); + + // Refresh the queue counts + auto qTxCount2 = getTxsQueued(); + BEAST_EXPECT(qTxCount2.size() <= 4); + + // This next test should remain unchanged regardless of + // transaction ordering + BEAST_EXPECT( + aliceSeq + bobSeq + charlieSeq + dariaSeq + elmoSeq + fredSeq + + gwenSeq + hankSeq + 7 == + env.seq(alice) + env.seq(bob) + env.seq(charlie) + env.seq(daria) + + env.seq(elmo) + env.seq(fred) + env.seq(gwen) + env.seq(hank)); + // These tests may change if TxQ ordering is changed + BEAST_EXPECTS( + aliceSeq + qTxCount1[alice.id()] - qTxCount2[alice.id()] == + env.seq(alice), + "alice: "s + std::to_string(aliceSeq) + ", " + + std::to_string(env.seq(alice))); + BEAST_EXPECTS( + bobSeq + qTxCount1[bob.id()] - qTxCount2[bob.id()] == env.seq(bob), + "bob: "s + std::to_string(bobSeq) + ", " + + std::to_string(env.seq(bob))); + BEAST_EXPECTS( + charlieSeq + qTxCount1[charlie.id()] - qTxCount2[charlie.id()] == + env.seq(charlie), + "charlie: "s + std::to_string(charlieSeq) + ", " + + std::to_string(env.seq(charlie))); + BEAST_EXPECTS( + dariaSeq + qTxCount1[daria.id()] - qTxCount2[daria.id()] == + env.seq(daria), + "daria: "s + std::to_string(dariaSeq) + ", " + + std::to_string(env.seq(daria))); + BEAST_EXPECTS( + elmoSeq + qTxCount1[elmo.id()] - qTxCount2[elmo.id()] == + env.seq(elmo), + "elmo: "s + std::to_string(elmoSeq) + ", " + + std::to_string(env.seq(elmo))); + BEAST_EXPECTS( + fredSeq + qTxCount1[fred.id()] - qTxCount2[fred.id()] == + env.seq(fred), + "fred: "s + std::to_string(fredSeq) + ", " + + std::to_string(env.seq(fred))); + BEAST_EXPECTS( + gwenSeq + qTxCount1[gwen.id()] - qTxCount2[gwen.id()] == + env.seq(gwen), + "gwen: "s + std::to_string(gwenSeq) + ", " + + std::to_string(env.seq(gwen))); + BEAST_EXPECTS( + hankSeq + qTxCount1[hank.id()] - qTxCount2[hank.id()] == + env.seq(hank), + "hank: "s + std::to_string(hankSeq) + ", " + + std::to_string(env.seq(hank))); } void @@ -1430,13 +1599,13 @@ class TxQ1_test : public beast::unit_test::suite BEAST_EXPECT(env.current()->fees().base == 10); - checkMetrics(env, 0, std::nullopt, 0, 1, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 0, 1, 256); env.fund(XRP(50000), noripple(alice)); - checkMetrics(env, 0, std::nullopt, 1, 1, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 1, 1, 256); env(fset(alice, asfAccountTxnID)); - checkMetrics(env, 0, std::nullopt, 2, 1, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 2, 1, 256); // Immediately after the fset, the sfAccountTxnID field // is still uninitialized, so preflight succeeds here, @@ -1445,14 +1614,14 @@ class TxQ1_test : public beast::unit_test::suite json(R"({"AccountTxnID": "0"})"), ter(telCAN_NOT_QUEUE)); - checkMetrics(env, 0, std::nullopt, 2, 1, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 2, 1, 256); env.close(); // The failed transaction is retried from LocalTx // and succeeds. - checkMetrics(env, 0, 4, 1, 2, 256); + checkMetrics(__LINE__, env, 0, 4, 1, 2, 256); env(noop(alice)); - checkMetrics(env, 0, 4, 2, 2, 256); + checkMetrics(__LINE__, env, 0, 4, 2, 2, 256); env(noop(alice), json(R"({"AccountTxnID": "0"})"), ter(tefWRONG_PRIOR)); } @@ -1475,19 +1644,19 @@ class TxQ1_test : public beast::unit_test::suite auto alice = Account("alice"); - checkMetrics(env, 0, std::nullopt, 0, 2, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 0, 2, 256); env.fund(XRP(50000), noripple(alice)); - checkMetrics(env, 0, std::nullopt, 1, 2, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 1, 2, 256); for (int i = 0; i < 10; ++i) env(noop(alice), openLedgerFee(env)); - checkMetrics(env, 0, std::nullopt, 11, 2, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 11, 2, 256); env.close(); // If not for the maximum, the per ledger would be 11. - checkMetrics(env, 0, 10, 0, 5, 256, 800025); + checkMetrics(__LINE__, env, 0, 10, 0, 5, 256, 800025); } try @@ -1577,22 +1746,22 @@ class TxQ1_test : public beast::unit_test::suite BEAST_EXPECT(env.current()->fees().base == 10); - checkMetrics(env, 0, initQueueMax, 0, 3, 256); + checkMetrics(__LINE__, env, 0, initQueueMax, 0, 3, 256); env.fund(drops(5000), noripple(alice)); env.fund(XRP(50000), noripple(bob)); - checkMetrics(env, 0, initQueueMax, 2, 3, 256); + checkMetrics(__LINE__, env, 0, initQueueMax, 2, 3, 256); auto USD = bob["USD"]; env(offer(alice, USD(5000), drops(5000)), require(owners(alice, 1))); - checkMetrics(env, 0, initQueueMax, 3, 3, 256); + checkMetrics(__LINE__, env, 0, initQueueMax, 3, 3, 256); env.close(); - checkMetrics(env, 0, 6, 0, 3, 256); + checkMetrics(__LINE__, env, 0, 6, 0, 3, 256); // Fill up the ledger fillQueue(env, alice); - checkMetrics(env, 0, 6, 4, 3, 256); + checkMetrics(__LINE__, env, 0, 6, 4, 3, 256); // Queue up a couple of transactions, plus one // more expensive one. @@ -1601,7 +1770,7 @@ class TxQ1_test : public beast::unit_test::suite env(noop(alice), seq(aliceSeq++), queued); env(noop(alice), seq(aliceSeq++), queued); env(noop(alice), fee(drops(1000)), seq(aliceSeq), queued); - checkMetrics(env, 4, 6, 4, 3, 256); + checkMetrics(__LINE__, env, 4, 6, 4, 3, 256); // This offer should take Alice's offer // up to Alice's reserve. @@ -1609,7 +1778,7 @@ class TxQ1_test : public beast::unit_test::suite openLedgerFee(env), require( balance(alice, drops(250)), owners(alice, 1), lines(alice, 1))); - checkMetrics(env, 4, 6, 5, 3, 256); + checkMetrics(__LINE__, env, 4, 6, 5, 3, 256); // Try adding a new transaction. // Too many fees in flight. @@ -1617,12 +1786,12 @@ class TxQ1_test : public beast::unit_test::suite fee(drops(200)), seq(aliceSeq + 1), ter(telCAN_NOT_QUEUE_BALANCE)); - checkMetrics(env, 4, 6, 5, 3, 256); + checkMetrics(__LINE__, env, 4, 6, 5, 3, 256); // Close the ledger. All of Alice's transactions // take a fee, except the last one. env.close(); - checkMetrics(env, 1, 10, 3, 5, 256); + checkMetrics(__LINE__, env, 1, 10, 3, 5, 256); env.require(balance(alice, drops(250 - 30))); // Still can't add a new transaction for Alice, @@ -1631,7 +1800,7 @@ class TxQ1_test : public beast::unit_test::suite fee(drops(200)), seq(aliceSeq + 1), ter(telCAN_NOT_QUEUE_BALANCE)); - checkMetrics(env, 1, 10, 3, 5, 256); + checkMetrics(__LINE__, env, 1, 10, 3, 5, 256); /* At this point, Alice's transaction is indefinitely stuck in the queue. Eventually it will either @@ -1643,13 +1812,13 @@ class TxQ1_test : public beast::unit_test::suite for (int i = 0; i < 9; ++i) { env.close(); - checkMetrics(env, 1, 10, 0, 5, 256); + checkMetrics(__LINE__, env, 1, 10, 0, 5, 256); } // And Alice's transaction expires (via the retry limit, // not LastLedgerSequence). env.close(); - checkMetrics(env, 0, 10, 0, 5, 256); + checkMetrics(__LINE__, env, 0, 10, 0, 5, 256); } void @@ -1669,11 +1838,11 @@ class TxQ1_test : public beast::unit_test::suite BEAST_EXPECT(env.current()->fees().base == 10); - checkMetrics(env, 0, std::nullopt, 0, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3, 256); env.fund(XRP(50000), noripple(alice, bob)); env.memoize(charlie); - checkMetrics(env, 0, std::nullopt, 2, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 2, 3, 256); { // Cannot put a blocker in an account's queue if that queue // already holds two or more (non-blocker) entries. @@ -1682,7 +1851,7 @@ class TxQ1_test : public beast::unit_test::suite env(noop(alice)); // Set a regular key just to clear the password spent flag env(regkey(alice, charlie)); - checkMetrics(env, 0, std::nullopt, 4, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 4, 3, 256); // Put two "normal" txs in the queue auto const aliceSeq = env.seq(alice); @@ -1708,11 +1877,11 @@ class TxQ1_test : public beast::unit_test::suite // Other accounts are not affected env(noop(bob), queued); - checkMetrics(env, 3, std::nullopt, 4, 3, 256); + checkMetrics(__LINE__, env, 3, std::nullopt, 4, 3, 256); // Drain the queue. env.close(); - checkMetrics(env, 0, 8, 4, 4, 256); + checkMetrics(__LINE__, env, 0, 8, 4, 4, 256); } { // Replace a lone non-blocking tx with a blocker. @@ -1750,7 +1919,7 @@ class TxQ1_test : public beast::unit_test::suite // Drain the queue. env.close(); - checkMetrics(env, 0, 10, 3, 5, 256); + checkMetrics(__LINE__, env, 0, 10, 3, 5, 256); } { // Put a blocker in an empty queue. @@ -1778,7 +1947,7 @@ class TxQ1_test : public beast::unit_test::suite // Drain the queue. env.close(); - checkMetrics(env, 0, 12, 3, 6, 256); + checkMetrics(__LINE__, env, 0, 12, 3, 6, 256); } } @@ -1799,12 +1968,12 @@ class TxQ1_test : public beast::unit_test::suite BEAST_EXPECT(env.current()->fees().base == 10); - checkMetrics(env, 0, std::nullopt, 0, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3, 256); env.fund(XRP(50000), noripple(alice, bob)); env.memoize(charlie); - checkMetrics(env, 0, std::nullopt, 2, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 2, 3, 256); std::uint32_t tkt{env.seq(alice) + 1}; { @@ -1815,7 +1984,7 @@ class TxQ1_test : public beast::unit_test::suite env(ticket::create(alice, 250), seq(tkt - 1)); // Set a regular key just to clear the password spent flag env(regkey(alice, charlie)); - checkMetrics(env, 0, std::nullopt, 4, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 4, 3, 256); // Put two "normal" txs in the queue auto const aliceSeq = env.seq(alice); @@ -1845,11 +2014,11 @@ class TxQ1_test : public beast::unit_test::suite // Other accounts are not affected env(noop(bob), queued); - checkMetrics(env, 3, std::nullopt, 4, 3, 256); + checkMetrics(__LINE__, env, 3, std::nullopt, 4, 3, 256); // Drain the queue and local transactions. env.close(); - checkMetrics(env, 0, 8, 5, 4, 256); + checkMetrics(__LINE__, env, 0, 8, 5, 4, 256); // Show that the local transactions have flushed through as well. BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); @@ -1906,7 +2075,7 @@ class TxQ1_test : public beast::unit_test::suite // Drain the queue. env.close(); - checkMetrics(env, 0, 10, 4, 5, 256); + checkMetrics(__LINE__, env, 0, 10, 4, 5, 256); // Show that the local transactions have flushed through as well. BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); @@ -1940,7 +2109,7 @@ class TxQ1_test : public beast::unit_test::suite // Drain the queue. env.close(); - checkMetrics(env, 0, 12, 3, 6, 256); + checkMetrics(__LINE__, env, 0, 12, 3, 6, 256); } } @@ -1972,10 +2141,10 @@ class TxQ1_test : public beast::unit_test::suite auto limit = 3; - checkMetrics(env, 0, initQueueMax, 0, limit, 256); + checkMetrics(__LINE__, env, 0, initQueueMax, 0, limit, 256); env.fund(XRP(50000), noripple(alice, charlie), gw); - checkMetrics(env, 0, initQueueMax, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 0, initQueueMax, limit + 1, limit, 256); auto USD = gw["USD"]; auto BUX = gw["BUX"]; @@ -1990,16 +2159,16 @@ class TxQ1_test : public beast::unit_test::suite // If this offer crosses, all of alice's // XRP will be taken (except the reserve). env(offer(alice, BUX(5000), XRP(50000)), queued); - checkMetrics(env, 1, initQueueMax, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 1, initQueueMax, limit + 1, limit, 256); // But because the reserve is protected, another // transaction will be allowed to queue env(noop(alice), seq(aliceSeq + 1), queued); - checkMetrics(env, 2, initQueueMax, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 2, initQueueMax, limit + 1, limit, 256); env.close(); ++limit; - checkMetrics(env, 0, limit * 2, 2, limit, 256); + checkMetrics(__LINE__, env, 0, limit * 2, 2, limit, 256); // But once we close the ledger, we find alice // has plenty of XRP, because the offer didn't @@ -2011,7 +2180,7 @@ class TxQ1_test : public beast::unit_test::suite ////////////////////////////////////////// // Offer with high XRP out and high total fee blocks later txs fillQueue(env, alice); - checkMetrics(env, 0, limit * 2, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 0, limit * 2, limit + 1, limit, 256); aliceSeq = env.seq(alice); aliceBal = env.balance(alice); @@ -2019,12 +2188,12 @@ class TxQ1_test : public beast::unit_test::suite // Alice creates an offer with a fee of half the reserve env(offer(alice, BUX(5000), XRP(50000)), fee(drops(100)), queued); - checkMetrics(env, 1, limit * 2, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 1, limit * 2, limit + 1, limit, 256); // Alice creates another offer with a fee // that brings the total to just shy of the reserve env(noop(alice), fee(drops(99)), seq(aliceSeq + 1), queued); - checkMetrics(env, 2, limit * 2, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 2, limit * 2, limit + 1, limit, 256); // So even a noop will look like alice // doesn't have the balance to pay the fee @@ -2032,11 +2201,11 @@ class TxQ1_test : public beast::unit_test::suite fee(drops(51)), seq(aliceSeq + 2), ter(terINSUF_FEE_B)); - checkMetrics(env, 2, limit * 2, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 2, limit * 2, limit + 1, limit, 256); env.close(); ++limit; - checkMetrics(env, 0, limit * 2, 3, limit, 256); + checkMetrics(__LINE__, env, 0, limit * 2, 3, limit, 256); // But once we close the ledger, we find alice // has plenty of XRP, because the offer didn't @@ -2048,7 +2217,7 @@ class TxQ1_test : public beast::unit_test::suite ////////////////////////////////////////// // Offer with high XRP out and super high fee blocks later txs fillQueue(env, alice); - checkMetrics(env, 0, limit * 2, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 0, limit * 2, limit + 1, limit, 256); aliceSeq = env.seq(alice); aliceBal = env.balance(alice); @@ -2057,7 +2226,7 @@ class TxQ1_test : public beast::unit_test::suite // Alice creates an offer with a fee larger than the reserve // This one can queue because it's the first in the queue for alice env(offer(alice, BUX(5000), XRP(50000)), fee(drops(300)), queued); - checkMetrics(env, 1, limit * 2, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 1, limit * 2, limit + 1, limit, 256); // So even a noop will look like alice // doesn't have the balance to pay the fee @@ -2065,11 +2234,11 @@ class TxQ1_test : public beast::unit_test::suite fee(drops(51)), seq(aliceSeq + 1), ter(telCAN_NOT_QUEUE_BALANCE)); - checkMetrics(env, 1, limit * 2, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 1, limit * 2, limit + 1, limit, 256); env.close(); ++limit; - checkMetrics(env, 0, limit * 2, 2, limit, 256); + checkMetrics(__LINE__, env, 0, limit * 2, 2, limit, 256); // But once we close the ledger, we find alice // has plenty of XRP, because the offer didn't @@ -2081,7 +2250,7 @@ class TxQ1_test : public beast::unit_test::suite ////////////////////////////////////////// // Offer with low XRP out allows later txs fillQueue(env, alice); - checkMetrics(env, 0, limit * 2, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 0, limit * 2, limit + 1, limit, 256); aliceSeq = env.seq(alice); aliceBal = env.balance(alice); @@ -2091,11 +2260,11 @@ class TxQ1_test : public beast::unit_test::suite // And later transactions are just fine env(noop(alice), seq(aliceSeq + 1), queued); - checkMetrics(env, 2, limit * 2, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 2, limit * 2, limit + 1, limit, 256); env.close(); ++limit; - checkMetrics(env, 0, limit * 2, 2, limit, 256); + checkMetrics(__LINE__, env, 0, limit * 2, 2, limit, 256); // But once we close the ledger, we find alice // has plenty of XRP, because the offer didn't @@ -2107,7 +2276,7 @@ class TxQ1_test : public beast::unit_test::suite ////////////////////////////////////////// // Large XRP payment doesn't block later txs fillQueue(env, alice); - checkMetrics(env, 0, limit * 2, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 0, limit * 2, limit + 1, limit, 256); aliceSeq = env.seq(alice); aliceBal = env.balance(alice); @@ -2120,11 +2289,11 @@ class TxQ1_test : public beast::unit_test::suite // But because the reserve is protected, another // transaction will be allowed to queue env(noop(alice), seq(aliceSeq + 1), queued); - checkMetrics(env, 2, limit * 2, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 2, limit * 2, limit + 1, limit, 256); env.close(); ++limit; - checkMetrics(env, 0, limit * 2, 2, limit, 256); + checkMetrics(__LINE__, env, 0, limit * 2, 2, limit, 256); // But once we close the ledger, we find alice // still has most of her balance, because the @@ -2134,7 +2303,7 @@ class TxQ1_test : public beast::unit_test::suite ////////////////////////////////////////// // Small XRP payment allows later txs fillQueue(env, alice); - checkMetrics(env, 0, limit * 2, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 0, limit * 2, limit + 1, limit, 256); aliceSeq = env.seq(alice); aliceBal = env.balance(alice); @@ -2145,11 +2314,11 @@ class TxQ1_test : public beast::unit_test::suite // And later transactions are just fine env(noop(alice), seq(aliceSeq + 1), queued); - checkMetrics(env, 2, limit * 2, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 2, limit * 2, limit + 1, limit, 256); env.close(); ++limit; - checkMetrics(env, 0, limit * 2, 2, limit, 256); + checkMetrics(__LINE__, env, 0, limit * 2, 2, limit, 256); // The payment succeeds env.require( @@ -2160,19 +2329,19 @@ class TxQ1_test : public beast::unit_test::suite auto const amount = USD(500000); env(trust(alice, USD(50000000))); env(trust(charlie, USD(50000000))); - checkMetrics(env, 0, limit * 2, 4, limit, 256); + checkMetrics(__LINE__, env, 0, limit * 2, 4, limit, 256); // Close so we don't have to deal // with tx ordering in consensus. env.close(); env(pay(gw, alice, amount)); - checkMetrics(env, 0, limit * 2, 1, limit, 256); + checkMetrics(__LINE__, env, 0, limit * 2, 1, limit, 256); // Close so we don't have to deal // with tx ordering in consensus. env.close(); fillQueue(env, alice); - checkMetrics(env, 0, limit * 2, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 0, limit * 2, limit + 1, limit, 256); aliceSeq = env.seq(alice); aliceBal = env.balance(alice); @@ -2185,11 +2354,11 @@ class TxQ1_test : public beast::unit_test::suite // But that's fine, because it doesn't affect // alice's XRP balance (other than the fee, of course). env(noop(alice), seq(aliceSeq + 1), queued); - checkMetrics(env, 2, limit * 2, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 2, limit * 2, limit + 1, limit, 256); env.close(); ++limit; - checkMetrics(env, 0, limit * 2, 2, limit, 256); + checkMetrics(__LINE__, env, 0, limit * 2, 2, limit, 256); // So once we close the ledger, alice has her // XRP balance, but her USD balance went to charlie. @@ -2209,7 +2378,7 @@ class TxQ1_test : public beast::unit_test::suite env.close(); fillQueue(env, charlie); - checkMetrics(env, 0, limit * 2, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 0, limit * 2, limit + 1, limit, 256); aliceSeq = env.seq(alice); aliceBal = env.balance(alice); @@ -2225,11 +2394,11 @@ class TxQ1_test : public beast::unit_test::suite // But because the reserve is protected, another // transaction will be allowed to queue env(noop(alice), seq(aliceSeq + 1), queued); - checkMetrics(env, 2, limit * 2, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 2, limit * 2, limit + 1, limit, 256); env.close(); ++limit; - checkMetrics(env, 0, limit * 2, 2, limit, 256); + checkMetrics(__LINE__, env, 0, limit * 2, 2, limit, 256); // So once we close the ledger, alice sent a payment // to charlie using only a portion of her XRP balance @@ -2244,7 +2413,7 @@ class TxQ1_test : public beast::unit_test::suite // Small XRP to IOU payment allows later txs. fillQueue(env, charlie); - checkMetrics(env, 0, limit * 2, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 0, limit * 2, limit + 1, limit, 256); aliceSeq = env.seq(alice); aliceBal = env.balance(alice); @@ -2259,11 +2428,11 @@ class TxQ1_test : public beast::unit_test::suite // And later transactions are just fine env(noop(alice), seq(aliceSeq + 1), queued); - checkMetrics(env, 2, limit * 2, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 2, limit * 2, limit + 1, limit, 256); env.close(); ++limit; - checkMetrics(env, 0, limit * 2, 2, limit, 256); + checkMetrics(__LINE__, env, 0, limit * 2, 2, limit, 256); // So once we close the ledger, alice sent a payment // to charlie using only a portion of her XRP balance @@ -2280,7 +2449,7 @@ class TxQ1_test : public beast::unit_test::suite env.close(); fillQueue(env, charlie); - checkMetrics(env, 0, limit * 2, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 0, limit * 2, limit + 1, limit, 256); aliceSeq = env.seq(alice); aliceBal = env.balance(alice); @@ -2290,11 +2459,11 @@ class TxQ1_test : public beast::unit_test::suite env(noop(alice), seq(aliceSeq + 1), ter(terINSUF_FEE_B)); BEAST_EXPECT(env.balance(alice) == drops(30)); - checkMetrics(env, 1, limit * 2, limit + 1, limit, 256); + checkMetrics(__LINE__, env, 1, limit * 2, limit + 1, limit, 256); env.close(); ++limit; - checkMetrics(env, 0, limit * 2, 1, limit, 256); + checkMetrics(__LINE__, env, 0, limit * 2, 1, limit, 256); BEAST_EXPECT(env.balance(alice) == drops(5)); } @@ -2380,27 +2549,27 @@ class TxQ1_test : public beast::unit_test::suite BEAST_EXPECT(env.current()->fees().base == 10); - checkMetrics(env, 0, std::nullopt, 0, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3, 256); // Fund accounts while the fee is cheap so they all apply. env.fund(XRP(50000), noripple(alice, bob, charlie)); - checkMetrics(env, 0, std::nullopt, 3, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 3, 3, 256); // Alice - no fee change yet env(noop(alice)); - checkMetrics(env, 0, std::nullopt, 4, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 4, 3, 256); // Bob with really high fee - applies env(noop(bob), openLedgerFee(env)); - checkMetrics(env, 0, std::nullopt, 5, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 5, 3, 256); // Charlie with low fee: queued env(noop(charlie), fee(1000), queued); - checkMetrics(env, 1, std::nullopt, 5, 3, 256); + checkMetrics(__LINE__, env, 1, std::nullopt, 5, 3, 256); env.close(); // Verify that the queued transaction was applied - checkMetrics(env, 0, 10, 1, 5, 256); + checkMetrics(__LINE__, env, 0, 10, 1, 5, 256); ///////////////////////////////////////////////////////////////// @@ -2411,7 +2580,7 @@ class TxQ1_test : public beast::unit_test::suite env(noop(bob), fee(1000)); env(noop(bob), fee(1000)); env(noop(bob), fee(1000)); - checkMetrics(env, 0, 10, 6, 5, 256); + checkMetrics(__LINE__, env, 0, 10, 6, 5, 256); // Use explicit fees so we can control which txn // will get dropped @@ -2435,7 +2604,7 @@ class TxQ1_test : public beast::unit_test::suite env(noop(alice), fee(21), seq(aliceSeq++), queued); // Queue is full now. - checkMetrics(env, 10, 10, 6, 5, 385); + checkMetrics(__LINE__, env, 10, 10, 6, 5, 385); // Try to add another transaction with the default (low) fee, // it should fail because the queue is full. @@ -2554,7 +2723,7 @@ class TxQ1_test : public beast::unit_test::suite auto const bob = Account("bob"); env.fund(XRP(500000), noripple(alice, bob)); - checkMetrics(env, 0, std::nullopt, 2, 1, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 2, 1, 256); auto const aliceSeq = env.seq(alice); BEAST_EXPECT(env.current()->info().seq == 3); @@ -2574,7 +2743,7 @@ class TxQ1_test : public beast::unit_test::suite seq(aliceSeq + 3), json(R"({"LastLedgerSequence":11})"), ter(terQUEUED)); - checkMetrics(env, 4, std::nullopt, 2, 1, 256); + checkMetrics(__LINE__, env, 4, std::nullopt, 2, 1, 256); auto const bobSeq = env.seq(bob); // Ledger 4 gets 3, // Ledger 5 gets 4, @@ -2583,17 +2752,17 @@ class TxQ1_test : public beast::unit_test::suite { env(noop(bob), seq(bobSeq + i), fee(200), ter(terQUEUED)); } - checkMetrics(env, 4 + 3 + 4 + 5, std::nullopt, 2, 1, 256); + checkMetrics(__LINE__, env, 4 + 3 + 4 + 5, std::nullopt, 2, 1, 256); // Close ledger 3 env.close(); - checkMetrics(env, 4 + 4 + 5, 20, 3, 2, 256); + checkMetrics(__LINE__, env, 4 + 4 + 5, 20, 3, 2, 256); // Close ledger 4 env.close(); - checkMetrics(env, 4 + 5, 30, 4, 3, 256); + checkMetrics(__LINE__, env, 4 + 5, 30, 4, 3, 256); // Close ledger 5 env.close(); // Alice's first two txs expired. - checkMetrics(env, 2, 40, 5, 4, 256); + checkMetrics(__LINE__, env, 2, 40, 5, 4, 256); // Because aliceSeq is missing, aliceSeq + 1 fails env(noop(alice), seq(aliceSeq + 1), ter(terPRE_SEQ)); @@ -2602,27 +2771,27 @@ class TxQ1_test : public beast::unit_test::suite env(fset(alice, asfAccountTxnID), seq(aliceSeq), ter(telCAN_NOT_QUEUE_BLOCKS)); - checkMetrics(env, 2, 40, 5, 4, 256); + checkMetrics(__LINE__, env, 2, 40, 5, 4, 256); // However we can fill the gap with a non-blocker. env(noop(alice), seq(aliceSeq), fee(20), ter(terQUEUED)); - checkMetrics(env, 3, 40, 5, 4, 256); + checkMetrics(__LINE__, env, 3, 40, 5, 4, 256); // Attempt to queue up a new aliceSeq + 1 tx that's a blocker. env(fset(alice, asfAccountTxnID), seq(aliceSeq + 1), ter(telCAN_NOT_QUEUE_BLOCKS)); - checkMetrics(env, 3, 40, 5, 4, 256); + checkMetrics(__LINE__, env, 3, 40, 5, 4, 256); // Queue up a non-blocker replacement for aliceSeq + 1. env(noop(alice), seq(aliceSeq + 1), fee(20), ter(terQUEUED)); - checkMetrics(env, 4, 40, 5, 4, 256); + checkMetrics(__LINE__, env, 4, 40, 5, 4, 256); // Close ledger 6 env.close(); // We expect that all of alice's queued tx's got into // the open ledger. - checkMetrics(env, 0, 50, 4, 5, 256); + checkMetrics(__LINE__, env, 0, 50, 4, 5, 256); BEAST_EXPECT(env.seq(alice) == aliceSeq + 4); } @@ -2648,7 +2817,7 @@ class TxQ1_test : public beast::unit_test::suite auto const bob = Account("bob"); env.fund(XRP(500000), noripple(alice, bob)); - checkMetrics(env, 0, std::nullopt, 2, 1, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 2, 1, 256); auto const aliceSeq = env.seq(alice); BEAST_EXPECT(env.current()->info().seq == 3); @@ -2695,7 +2864,7 @@ class TxQ1_test : public beast::unit_test::suite seq(aliceSeq + 19), json(R"({"LastLedgerSequence":11})"), ter(terQUEUED)); - checkMetrics(env, 10, std::nullopt, 2, 1, 256); + checkMetrics(__LINE__, env, 10, std::nullopt, 2, 1, 256); auto const bobSeq = env.seq(bob); // Ledger 4 gets 2 from bob and 1 from alice, @@ -2705,21 +2874,21 @@ class TxQ1_test : public beast::unit_test::suite { env(noop(bob), seq(bobSeq + i), fee(200), ter(terQUEUED)); } - checkMetrics(env, 10 + 2 + 4 + 5, std::nullopt, 2, 1, 256); + checkMetrics(__LINE__, env, 10 + 2 + 4 + 5, std::nullopt, 2, 1, 256); // Close ledger 3 env.close(); - checkMetrics(env, 9 + 4 + 5, 20, 3, 2, 256); + checkMetrics(__LINE__, env, 9 + 4 + 5, 20, 3, 2, 256); BEAST_EXPECT(env.seq(alice) == aliceSeq + 12); // Close ledger 4 env.close(); - checkMetrics(env, 9 + 5, 30, 4, 3, 256); + checkMetrics(__LINE__, env, 9 + 5, 30, 4, 3, 256); BEAST_EXPECT(env.seq(alice) == aliceSeq + 12); // Close ledger 5 env.close(); // Three of Alice's txs expired. - checkMetrics(env, 6, 40, 5, 4, 256); + checkMetrics(__LINE__, env, 6, 40, 5, 4, 256); BEAST_EXPECT(env.seq(alice) == aliceSeq + 12); // Top off Alice's queue again using Tickets so the sequence gap is @@ -2730,7 +2899,7 @@ class TxQ1_test : public beast::unit_test::suite env(noop(alice), ticket::use(aliceSeq + 4), ter(terQUEUED)); env(noop(alice), ticket::use(aliceSeq + 5), ter(terQUEUED)); env(noop(alice), ticket::use(aliceSeq + 6), ter(telCAN_NOT_QUEUE_FULL)); - checkMetrics(env, 11, 40, 5, 4, 256); + checkMetrics(__LINE__, env, 11, 40, 5, 4, 256); // Even though alice's queue is full we can still slide in a couple // more transactions because she has a sequence gap. But we @@ -2761,7 +2930,7 @@ class TxQ1_test : public beast::unit_test::suite // Finally we can fill in the entire gap. env(noop(alice), seq(aliceSeq + 18), ter(terQUEUED)); - checkMetrics(env, 14, 40, 5, 4, 256); + checkMetrics(__LINE__, env, 14, 40, 5, 4, 256); // Verify that nothing can be added now that the gap is filled. env(noop(alice), seq(aliceSeq + 20), ter(telCAN_NOT_QUEUE_FULL)); @@ -2770,18 +2939,18 @@ class TxQ1_test : public beast::unit_test::suite // but alice adds one more transaction at seq(aliceSeq + 20) so // we only see a reduction by 5. env.close(); - checkMetrics(env, 9, 50, 6, 5, 256); + checkMetrics(__LINE__, env, 9, 50, 6, 5, 256); BEAST_EXPECT(env.seq(alice) == aliceSeq + 15); // Close ledger 7. That should remove 7 more of alice's transactions. env.close(); - checkMetrics(env, 2, 60, 7, 6, 256); + checkMetrics(__LINE__, env, 2, 60, 7, 6, 256); BEAST_EXPECT(env.seq(alice) == aliceSeq + 19); // Close one last ledger to see all of alice's transactions moved // into the ledger, including the tickets env.close(); - checkMetrics(env, 0, 70, 2, 7, 256); + checkMetrics(__LINE__, env, 0, 70, 2, 7, 256); BEAST_EXPECT(env.seq(alice) == aliceSeq + 21); } @@ -2799,7 +2968,7 @@ class TxQ1_test : public beast::unit_test::suite env.fund(XRP(100000), alice, bob); fillQueue(env, alice); - checkMetrics(env, 0, std::nullopt, 7, 6, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 7, 6, 256); // Queue up several transactions for alice sign-and-submit auto const aliceSeq = env.seq(alice); @@ -2819,9 +2988,9 @@ class TxQ1_test : public beast::unit_test::suite envs(noop(alice), fee(1000), seq(none), ter(terQUEUED))( submitParams); } - checkMetrics(env, 5, std::nullopt, 7, 6, 256); + checkMetrics(__LINE__, env, 5, std::nullopt, 7, 6, 256); { - auto aliceStat = txQ.getAccountTxs(alice.id(), *env.current()); + auto aliceStat = txQ.getAccountTxs(alice.id()); SeqProxy seq = SeqProxy::sequence(aliceSeq); BEAST_EXPECT(aliceStat.size() == 5); for (auto const& tx : aliceStat) @@ -2844,34 +3013,34 @@ class TxQ1_test : public beast::unit_test::suite // Give them a higher fee so they'll beat alice's. for (int i = 0; i < 8; ++i) envs(noop(bob), fee(2000), seq(none), ter(terQUEUED))(); - checkMetrics(env, 13, std::nullopt, 7, 6, 256); + checkMetrics(__LINE__, env, 13, std::nullopt, 7, 6, 256); env.close(); - checkMetrics(env, 5, 14, 8, 7, 256); + checkMetrics(__LINE__, env, 5, 14, 8, 7, 256); // Put some more txs in the queue for bob. // Give them a higher fee so they'll beat alice's. fillQueue(env, bob); for (int i = 0; i < 9; ++i) envs(noop(bob), fee(2000), seq(none), ter(terQUEUED))(); - checkMetrics(env, 14, 14, 8, 7, 25601); + checkMetrics(__LINE__, env, 14, 14, 8, 7, 25601); env.close(); // Put some more txs in the queue for bob. // Give them a higher fee so they'll beat alice's. fillQueue(env, bob); for (int i = 0; i < 10; ++i) envs(noop(bob), fee(2000), seq(none), ter(terQUEUED))(); - checkMetrics(env, 15, 16, 9, 8, 256); + checkMetrics(__LINE__, env, 15, 16, 9, 8, 256); env.close(); - checkMetrics(env, 4, 18, 10, 9, 256); + checkMetrics(__LINE__, env, 4, 18, 10, 9, 256); { // Bob has nothing left in the queue. - auto bobStat = txQ.getAccountTxs(bob.id(), *env.current()); + auto bobStat = txQ.getAccountTxs(bob.id()); BEAST_EXPECT(bobStat.empty()); } // Verify alice's tx got dropped as we BEAST_EXPECT, and that there's // a gap in her queued txs. { - auto aliceStat = txQ.getAccountTxs(alice.id(), *env.current()); + auto aliceStat = txQ.getAccountTxs(alice.id()); auto seq = aliceSeq; BEAST_EXPECT(aliceStat.size() == 4); for (auto const& tx : aliceStat) @@ -2888,9 +3057,9 @@ class TxQ1_test : public beast::unit_test::suite } // Now, fill the gap. envs(noop(alice), fee(1000), seq(none), ter(terQUEUED))(submitParams); - checkMetrics(env, 5, 18, 10, 9, 256); + checkMetrics(__LINE__, env, 5, 18, 10, 9, 256); { - auto aliceStat = txQ.getAccountTxs(alice.id(), *env.current()); + auto aliceStat = txQ.getAccountTxs(alice.id()); auto seq = aliceSeq; BEAST_EXPECT(aliceStat.size() == 5); for (auto const& tx : aliceStat) @@ -2903,14 +3072,14 @@ class TxQ1_test : public beast::unit_test::suite } env.close(); - checkMetrics(env, 0, 20, 5, 10, 256); + checkMetrics(__LINE__, env, 0, 20, 5, 10, 256); { // Bob's data has been cleaned up. - auto bobStat = txQ.getAccountTxs(bob.id(), *env.current()); + auto bobStat = txQ.getAccountTxs(bob.id()); BEAST_EXPECT(bobStat.empty()); } { - auto aliceStat = txQ.getAccountTxs(alice.id(), *env.current()); + auto aliceStat = txQ.getAccountTxs(alice.id()); BEAST_EXPECT(aliceStat.empty()); } } @@ -2961,10 +3130,10 @@ class TxQ1_test : public beast::unit_test::suite BEAST_EXPECT(!queue_data.isMember(jss::max_spend_drops_total)); BEAST_EXPECT(!queue_data.isMember(jss::transactions)); } - checkMetrics(env, 0, 6, 0, 3, 256); + checkMetrics(__LINE__, env, 0, 6, 0, 3, 256); fillQueue(env, alice); - checkMetrics(env, 0, 6, 4, 3, 256); + checkMetrics(__LINE__, env, 0, 6, 4, 3, 256); { auto const info = env.rpc("json", "account_info", withQueue); @@ -2989,7 +3158,7 @@ class TxQ1_test : public beast::unit_test::suite envs(noop(alice), fee(100), seq(none), ter(terQUEUED))(submitParams); envs(noop(alice), fee(100), seq(none), ter(terQUEUED))(submitParams); envs(noop(alice), fee(100), seq(none), ter(terQUEUED))(submitParams); - checkMetrics(env, 4, 6, 4, 3, 256); + checkMetrics(__LINE__, env, 4, 6, 4, 3, 256); { auto const info = env.rpc("json", "account_info", withQueue); @@ -3036,7 +3205,7 @@ class TxQ1_test : public beast::unit_test::suite // Drain the queue so we can queue up a blocker. env.close(); - checkMetrics(env, 0, 8, 4, 4, 256); + checkMetrics(__LINE__, env, 0, 8, 4, 4, 256); // Fill the ledger and then queue up a blocker. envs(noop(alice), seq(none))(submitParams); @@ -3047,7 +3216,7 @@ class TxQ1_test : public beast::unit_test::suite seq(none), json(jss::LastLedgerSequence, 10), ter(terQUEUED))(submitParams); - checkMetrics(env, 1, 8, 5, 4, 256); + checkMetrics(__LINE__, env, 1, 8, 5, 4, 256); { auto const info = env.rpc("json", "account_info", withQueue); @@ -3103,7 +3272,7 @@ class TxQ1_test : public beast::unit_test::suite envs(noop(alice), fee(100), seq(none), ter(telCAN_NOT_QUEUE_BLOCKED))( submitParams); - checkMetrics(env, 1, 8, 5, 4, 256); + checkMetrics(__LINE__, env, 1, 8, 5, 4, 256); { auto const info = env.rpc("json", "account_info", withQueue); @@ -3171,9 +3340,9 @@ class TxQ1_test : public beast::unit_test::suite } env.close(); - checkMetrics(env, 0, 10, 2, 5, 256); + checkMetrics(__LINE__, env, 0, 10, 2, 5, 256); env.close(); - checkMetrics(env, 0, 10, 0, 5, 256); + checkMetrics(__LINE__, env, 0, 10, 0, 5, 256); { auto const info = env.rpc("json", "account_info", withQueue); @@ -3242,17 +3411,17 @@ class TxQ1_test : public beast::unit_test::suite state[jss::load_factor_fee_reference] == 256); } - checkMetrics(env, 0, 6, 0, 3, 256); + checkMetrics(__LINE__, env, 0, 6, 0, 3, 256); fillQueue(env, alice); - checkMetrics(env, 0, 6, 4, 3, 256); + checkMetrics(__LINE__, env, 0, 6, 4, 3, 256); auto aliceSeq = env.seq(alice); auto submitParams = Json::Value(Json::objectValue); for (auto i = 0; i < 4; ++i) envs(noop(alice), fee(100), seq(aliceSeq + i), ter(terQUEUED))( submitParams); - checkMetrics(env, 4, 6, 4, 3, 256); + checkMetrics(__LINE__, env, 4, 6, 4, 3, 256); { auto const server_info = env.rpc("server_info"); @@ -3477,7 +3646,7 @@ class TxQ1_test : public beast::unit_test::suite // Fund the first few accounts at non escalated fee env.fund(XRP(50000), noripple(a, b, c, d)); - checkMetrics(env, 0, std::nullopt, 4, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 4, 3, 256); // First transaction establishes the messaging using namespace std::chrono_literals; @@ -3527,7 +3696,7 @@ class TxQ1_test : public beast::unit_test::suite jv[jss::load_factor_fee_reference] == 256; })); - checkMetrics(env, 0, 8, 0, 4, 256); + checkMetrics(__LINE__, env, 0, 8, 0, 4, 256); // Fund then next few accounts at non escalated fee env.fund(XRP(50000), noripple(e, f, g, h, i)); @@ -3541,7 +3710,7 @@ class TxQ1_test : public beast::unit_test::suite env(noop(e), fee(10), queued); env(noop(f), fee(10), queued); env(noop(g), fee(10), queued); - checkMetrics(env, 7, 8, 5, 4, 256); + checkMetrics(__LINE__, env, 7, 8, 5, 4, 256); // Last transaction escalates the fee BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) { @@ -3610,7 +3779,7 @@ class TxQ1_test : public beast::unit_test::suite auto alice = Account("alice"); auto bob = Account("bob"); - checkMetrics(env, 0, std::nullopt, 0, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3, 256); env.fund(XRP(50000000), alice, bob); fillQueue(env, alice); @@ -3656,7 +3825,7 @@ class TxQ1_test : public beast::unit_test::suite seq(aliceSeq++), ter(terQUEUED)); - checkMetrics(env, 3, std::nullopt, 4, 3, 256); + checkMetrics(__LINE__, env, 3, std::nullopt, 4, 3, 256); // Figure out how much it would cost to cover all the // queued txs + itself @@ -3669,7 +3838,7 @@ class TxQ1_test : public beast::unit_test::suite // the edge case test. env(noop(alice), fee(totalFee1), seq(aliceSeq++), ter(terQUEUED)); - checkMetrics(env, 4, std::nullopt, 4, 3, 256); + checkMetrics(__LINE__, env, 4, std::nullopt, 4, 3, 256); // Now repeat the process including the new tx // and avoiding the rounding error @@ -3679,7 +3848,7 @@ class TxQ1_test : public beast::unit_test::suite // Submit a transaction with that fee. It will succeed. env(noop(alice), fee(totalFee2), seq(aliceSeq++)); - checkMetrics(env, 0, std::nullopt, 9, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 9, 3, 256); } testcase("replace last tx with enough to clear queue"); @@ -3699,7 +3868,7 @@ class TxQ1_test : public beast::unit_test::suite seq(aliceSeq++), ter(terQUEUED)); - checkMetrics(env, 3, std::nullopt, 9, 3, 256); + checkMetrics(__LINE__, env, 3, std::nullopt, 9, 3, 256); // Figure out how much it would cost to cover all the // queued txs + itself @@ -3712,10 +3881,10 @@ class TxQ1_test : public beast::unit_test::suite env(noop(alice), fee(totalFee), seq(aliceSeq++)); // The queue is clear - checkMetrics(env, 0, std::nullopt, 12, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 12, 3, 256); env.close(); - checkMetrics(env, 0, 24, 0, 12, 256); + checkMetrics(__LINE__, env, 0, 24, 0, 12, 256); } testcase("replace middle tx with enough to clear queue"); @@ -3728,7 +3897,7 @@ class TxQ1_test : public beast::unit_test::suite env(noop(alice), fee(100), seq(aliceSeq++), ter(terQUEUED)); } - checkMetrics(env, 5, 24, 13, 12, 256); + checkMetrics(__LINE__, env, 5, 24, 13, 12, 256); // Figure out how much it would cost to cover 3 txns std::uint64_t const totalFee = calcTotalFee(100 * 2, 3); @@ -3737,9 +3906,9 @@ class TxQ1_test : public beast::unit_test::suite aliceSeq -= 3; env(noop(alice), fee(totalFee), seq(aliceSeq++)); - checkMetrics(env, 2, 24, 16, 12, 256); + checkMetrics(__LINE__, env, 2, 24, 16, 12, 256); auto const aliceQueue = - env.app().getTxQ().getAccountTxs(alice.id(), *env.current()); + env.app().getTxQ().getAccountTxs(alice.id()); BEAST_EXPECT(aliceQueue.size() == 2); SeqProxy seq = SeqProxy::sequence(aliceSeq); for (auto const& tx : aliceQueue) @@ -3751,7 +3920,7 @@ class TxQ1_test : public beast::unit_test::suite // Close the ledger to clear the queue env.close(); - checkMetrics(env, 0, 32, 2, 16, 256); + checkMetrics(__LINE__, env, 0, 32, 2, 16, 256); } testcase("clear queue failure (load)"); @@ -3768,7 +3937,7 @@ class TxQ1_test : public beast::unit_test::suite env(noop(alice), fee(22), seq(aliceSeq++), ter(terQUEUED)); } - checkMetrics(env, 4, 32, 17, 16, 256); + checkMetrics(__LINE__, env, 4, 32, 17, 16, 256); // Figure out how much it would cost to cover all the txns // + 1 @@ -3783,11 +3952,11 @@ class TxQ1_test : public beast::unit_test::suite env(noop(alice), fee(totalFee), seq(aliceSeq++), ter(terQUEUED)); // The original last transaction is still in the queue - checkMetrics(env, 5, 32, 17, 16, 256); + checkMetrics(__LINE__, env, 5, 32, 17, 16, 256); // With high load, some of the txs stay in the queue env.close(); - checkMetrics(env, 3, 34, 2, 17, 256); + checkMetrics(__LINE__, env, 3, 34, 2, 17, 256); // Load drops back down feeTrack.setRemoteFee(origFee); @@ -3795,14 +3964,14 @@ class TxQ1_test : public beast::unit_test::suite // Because of the earlier failure, alice can not clear the queue, // no matter how high the fee fillQueue(env, bob); - checkMetrics(env, 3, 34, 18, 17, 256); + checkMetrics(__LINE__, env, 3, 34, 18, 17, 256); env(noop(alice), fee(XRP(1)), seq(aliceSeq++), ter(terQUEUED)); - checkMetrics(env, 4, 34, 18, 17, 256); + checkMetrics(__LINE__, env, 4, 34, 18, 17, 256); // With normal load, those txs get into the ledger env.close(); - checkMetrics(env, 0, 36, 4, 18, 256); + checkMetrics(__LINE__, env, 0, 36, 4, 18, 256); } } @@ -3824,77 +3993,77 @@ class TxQ1_test : public beast::unit_test::suite {"maximum_txn_per_account", "200"}})); auto alice = Account("alice"); - checkMetrics(env, 0, std::nullopt, 0, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3, 256); env.fund(XRP(50000000), alice); fillQueue(env, alice); - checkMetrics(env, 0, std::nullopt, 4, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 4, 3, 256); auto seqAlice = env.seq(alice); auto txCount = 140; for (int i = 0; i < txCount; ++i) env(noop(alice), seq(seqAlice++), ter(terQUEUED)); - checkMetrics(env, txCount, std::nullopt, 4, 3, 256); + checkMetrics(__LINE__, env, txCount, std::nullopt, 4, 3, 256); // Close a few ledgers successfully, so the limit grows env.close(); // 4 + 25% = 5 txCount -= 6; - checkMetrics(env, txCount, 10, 6, 5, 257); + checkMetrics(__LINE__, env, txCount, 10, 6, 5, 257); env.close(); // 6 + 25% = 7 txCount -= 8; - checkMetrics(env, txCount, 14, 8, 7, 257); + checkMetrics(__LINE__, env, txCount, 14, 8, 7, 257); env.close(); // 8 + 25% = 10 txCount -= 11; - checkMetrics(env, txCount, 20, 11, 10, 257); + checkMetrics(__LINE__, env, txCount, 20, 11, 10, 257); env.close(); // 11 + 25% = 13 txCount -= 14; - checkMetrics(env, txCount, 26, 14, 13, 257); + checkMetrics(__LINE__, env, txCount, 26, 14, 13, 257); env.close(); // 14 + 25% = 17 txCount -= 18; - checkMetrics(env, txCount, 34, 18, 17, 257); + checkMetrics(__LINE__, env, txCount, 34, 18, 17, 257); env.close(); // 18 + 25% = 22 txCount -= 23; - checkMetrics(env, txCount, 44, 23, 22, 257); + checkMetrics(__LINE__, env, txCount, 44, 23, 22, 257); env.close(); // 23 + 25% = 28 txCount -= 29; - checkMetrics(env, txCount, 56, 29, 28, 256); + checkMetrics(__LINE__, env, txCount, 56, 29, 28, 256); // From 3 expected to 28 in 7 "fast" ledgers. // Close the ledger with a delay. env.close(env.now() + 5s, 10000ms); txCount -= 15; - checkMetrics(env, txCount, 56, 15, 14, 256); + checkMetrics(__LINE__, env, txCount, 56, 15, 14, 256); // Close the ledger with a delay. env.close(env.now() + 5s, 10000ms); txCount -= 8; - checkMetrics(env, txCount, 56, 8, 7, 256); + checkMetrics(__LINE__, env, txCount, 56, 8, 7, 256); // Close the ledger with a delay. env.close(env.now() + 5s, 10000ms); txCount -= 4; - checkMetrics(env, txCount, 56, 4, 3, 256); + checkMetrics(__LINE__, env, txCount, 56, 4, 3, 256); // From 28 expected back down to 3 in 3 "slow" ledgers. // Confirm the minimum sticks env.close(env.now() + 5s, 10000ms); txCount -= 4; - checkMetrics(env, txCount, 56, 4, 3, 256); + checkMetrics(__LINE__, env, txCount, 56, 4, 3, 256); BEAST_EXPECT(!txCount); } @@ -3910,35 +4079,35 @@ class TxQ1_test : public beast::unit_test::suite {"maximum_txn_per_account", "200"}})); auto alice = Account("alice"); - checkMetrics(env, 0, std::nullopt, 0, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3, 256); env.fund(XRP(50000000), alice); fillQueue(env, alice); - checkMetrics(env, 0, std::nullopt, 4, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 4, 3, 256); auto seqAlice = env.seq(alice); auto txCount = 43; for (int i = 0; i < txCount; ++i) env(noop(alice), seq(seqAlice++), ter(terQUEUED)); - checkMetrics(env, txCount, std::nullopt, 4, 3, 256); + checkMetrics(__LINE__, env, txCount, std::nullopt, 4, 3, 256); // Close a few ledgers successfully, so the limit grows env.close(); // 4 + 150% = 10 txCount -= 11; - checkMetrics(env, txCount, 20, 11, 10, 257); + checkMetrics(__LINE__, env, txCount, 20, 11, 10, 257); env.close(); // 11 + 150% = 27 txCount -= 28; - checkMetrics(env, txCount, 54, 28, 27, 256); + checkMetrics(__LINE__, env, txCount, 54, 28, 27, 256); // From 3 expected to 28 in 7 "fast" ledgers. // Close the ledger with a delay. env.close(env.now() + 5s, 10000ms); txCount -= 4; - checkMetrics(env, txCount, 54, 4, 3, 256); + checkMetrics(__LINE__, env, txCount, 54, 4, 3, 256); // From 28 expected back down to 3 in 3 "slow" ledgers. @@ -3968,19 +4137,19 @@ class TxQ1_test : public beast::unit_test::suite BEAST_EXPECT(env.current()->fees().base == 10); - checkMetrics(env, 0, std::nullopt, 0, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3, 256); // Create account env.fund(XRP(50000), noripple(alice)); - checkMetrics(env, 0, std::nullopt, 1, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 1, 3, 256); fillQueue(env, alice); - checkMetrics(env, 0, std::nullopt, 4, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 4, 3, 256); // Queue a transaction auto const aliceSeq = env.seq(alice); env(noop(alice), queued); - checkMetrics(env, 1, std::nullopt, 4, 3, 256); + checkMetrics(__LINE__, env, 1, std::nullopt, 4, 3, 256); // Now, apply a (different) transaction directly // to the open ledger, bypassing the queue @@ -3996,23 +4165,23 @@ class TxQ1_test : public beast::unit_test::suite return result.second; }); // the queued transaction is still there - checkMetrics(env, 1, std::nullopt, 5, 3, 256); + checkMetrics(__LINE__, env, 1, std::nullopt, 5, 3, 256); // The next transaction should be able to go into the open // ledger, even though aliceSeq is queued. In earlier incarnations // of the TxQ this would cause an assert. env(noop(alice), seq(aliceSeq + 1), openLedgerFee(env)); - checkMetrics(env, 1, std::nullopt, 6, 3, 256); + checkMetrics(__LINE__, env, 1, std::nullopt, 6, 3, 256); // Now queue a couple more transactions to make sure // they succeed despite aliceSeq being queued env(noop(alice), seq(aliceSeq + 2), queued); env(noop(alice), seq(aliceSeq + 3), queued); - checkMetrics(env, 3, std::nullopt, 6, 3, 256); + checkMetrics(__LINE__, env, 3, std::nullopt, 6, 3, 256); // Now close the ledger. One of the queued transactions // (aliceSeq) should be dropped. env.close(); - checkMetrics(env, 0, 12, 2, 6, 256); + checkMetrics(__LINE__, env, 0, 12, 2, 6, 256); } void @@ -4035,11 +4204,11 @@ class TxQ1_test : public beast::unit_test::suite BEAST_EXPECT(env.current()->fees().base == 10); - checkMetrics(env, 0, std::nullopt, 0, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3, 256); // Create account env.fund(XRP(50000), noripple(alice)); - checkMetrics(env, 0, std::nullopt, 1, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 1, 3, 256); // Create tickets std::uint32_t const tktSeq0{env.seq(alice) + 1}; @@ -4047,12 +4216,12 @@ class TxQ1_test : public beast::unit_test::suite // Fill the queue so the next transaction will be queued. fillQueue(env, alice); - checkMetrics(env, 0, std::nullopt, 4, 3, 256); + checkMetrics(__LINE__, env, 0, std::nullopt, 4, 3, 256); // Queue a transaction with a ticket. Leave an unused ticket // on either side. env(noop(alice), ticket::use(tktSeq0 + 1), queued); - checkMetrics(env, 1, std::nullopt, 4, 3, 256); + checkMetrics(__LINE__, env, 1, std::nullopt, 4, 3, 256); // Now, apply a (different) transaction directly // to the open ledger, bypassing the queue @@ -4068,25 +4237,25 @@ class TxQ1_test : public beast::unit_test::suite return result.second; }); // the queued transaction is still there - checkMetrics(env, 1, std::nullopt, 5, 3, 256); + checkMetrics(__LINE__, env, 1, std::nullopt, 5, 3, 256); // The next (sequence-based) transaction should be able to go into // the open ledger, even though tktSeq0 is queued. Note that this // sequence-based transaction goes in front of the queued // transaction, so the queued transaction is left in the queue. env(noop(alice), openLedgerFee(env)); - checkMetrics(env, 1, std::nullopt, 6, 3, 256); + checkMetrics(__LINE__, env, 1, std::nullopt, 6, 3, 256); // We should be able to do the same thing with a ticket that goes // if front of the queued transaction. This one too will leave // the queued transaction in place. env(noop(alice), ticket::use(tktSeq0 + 0), openLedgerFee(env)); - checkMetrics(env, 1, std::nullopt, 7, 3, 256); + checkMetrics(__LINE__, env, 1, std::nullopt, 7, 3, 256); // We have one ticketed transaction in the queue. We should able // to add another to the queue. env(noop(alice), ticket::use(tktSeq0 + 2), queued); - checkMetrics(env, 2, std::nullopt, 7, 3, 256); + checkMetrics(__LINE__, env, 2, std::nullopt, 7, 3, 256); // Here we try to force the queued transactions into the ledger by // adding one more queued (ticketed) transaction that pays enough @@ -4102,12 +4271,12 @@ class TxQ1_test : public beast::unit_test::suite // transaction is equally capable of going into the ledger independent // of all other ticket- or sequence-based transactions. env(noop(alice), ticket::use(tktSeq0 + 3), fee(XRP(1))); - checkMetrics(env, 2, std::nullopt, 8, 3, 256); + checkMetrics(__LINE__, env, 2, std::nullopt, 8, 3, 256); // Now close the ledger. One of the queued transactions // (the one with tktSeq0 + 1) should be dropped. env.close(); - checkMetrics(env, 0, 16, 1, 8, 256); + checkMetrics(__LINE__, env, 0, 16, 1, 8, 256); } void @@ -4150,7 +4319,7 @@ class TxQ1_test : public beast::unit_test::suite env.close(); env.fund(XRP(10000), fiona); env.close(); - checkMetrics(env, 0, 10, 0, 2, 256); + checkMetrics(__LINE__, env, 0, 10, 0, 2, 256); // Close ledgers until the amendments show up. int i = 0; @@ -4161,7 +4330,8 @@ class TxQ1_test : public beast::unit_test::suite break; } auto expectedPerLedger = ripple::detail::numUpVotedAmendments() + 1; - checkMetrics(env, 0, 5 * expectedPerLedger, 0, expectedPerLedger, 256); + checkMetrics( + __LINE__, env, 0, 5 * expectedPerLedger, 0, expectedPerLedger, 256); // Now wait 2 weeks modulo 256 ledgers for the amendments to be // enabled. Speed the process by closing ledgers every 80 minutes, @@ -4174,6 +4344,7 @@ class TxQ1_test : public beast::unit_test::suite // We're very close to the flag ledger. Fill the ledger. fillQueue(env, alice); checkMetrics( + __LINE__, env, 0, 5 * expectedPerLedger, @@ -4201,6 +4372,7 @@ class TxQ1_test : public beast::unit_test::suite } std::size_t expectedInQueue = 60; checkMetrics( + __LINE__, env, expectedInQueue, 5 * expectedPerLedger, @@ -4228,6 +4400,7 @@ class TxQ1_test : public beast::unit_test::suite expectedInLedger -= expectedInQueue; ++expectedPerLedger; checkMetrics( + __LINE__, env, expectedInQueue, 5 * expectedPerLedger, @@ -4314,7 +4487,7 @@ class TxQ1_test : public beast::unit_test::suite // of their transactions expire out of the queue. To start out // alice fills the ledger. fillQueue(env, alice); - checkMetrics(env, 0, 50, 7, 6, 256); + checkMetrics(__LINE__, env, 0, 50, 7, 6, 256); // Now put a few transactions into alice's queue, including one that // will expire out soon. @@ -4360,9 +4533,9 @@ class TxQ1_test : public beast::unit_test::suite env(noop(fiona), seq(seqFiona++), fee(--feeDrops), ter(terQUEUED)); } - checkMetrics(env, 34, 50, 7, 6, 256); + checkMetrics(__LINE__, env, 34, 50, 7, 6, 256); env.close(); - checkMetrics(env, 26, 50, 8, 7, 256); + checkMetrics(__LINE__, env, 26, 50, 8, 7, 256); // Re-fill the queue so alice and bob stay stuck. feeDrops = medFee; @@ -4373,9 +4546,9 @@ class TxQ1_test : public beast::unit_test::suite env(noop(ellie), seq(seqEllie++), fee(--feeDrops), ter(terQUEUED)); env(noop(fiona), seq(seqFiona++), fee(--feeDrops), ter(terQUEUED)); } - checkMetrics(env, 38, 50, 8, 7, 256); + checkMetrics(__LINE__, env, 38, 50, 8, 7, 256); env.close(); - checkMetrics(env, 29, 50, 9, 8, 256); + checkMetrics(__LINE__, env, 29, 50, 9, 8, 256); // One more time... feeDrops = medFee; @@ -4386,9 +4559,9 @@ class TxQ1_test : public beast::unit_test::suite env(noop(ellie), seq(seqEllie++), fee(--feeDrops), ter(terQUEUED)); env(noop(fiona), seq(seqFiona++), fee(--feeDrops), ter(terQUEUED)); } - checkMetrics(env, 41, 50, 9, 8, 256); + checkMetrics(__LINE__, env, 41, 50, 9, 8, 256); env.close(); - checkMetrics(env, 29, 50, 10, 9, 256); + checkMetrics(__LINE__, env, 29, 50, 10, 9, 256); // Finally the stage is set. alice's and bob's transactions expired // out of the queue which caused the dropPenalty flag to be set on @@ -4410,7 +4583,7 @@ class TxQ1_test : public beast::unit_test::suite env(noop(carol), seq(seqCarol++), fee(--feeDrops), ter(terQUEUED)); env(noop(daria), seq(seqDaria++), fee(--feeDrops), ter(terQUEUED)); env(noop(ellie), seq(seqEllie++), fee(--feeDrops), ter(terQUEUED)); - checkMetrics(env, 48, 50, 10, 9, 256); + checkMetrics(__LINE__, env, 48, 50, 10, 9, 256); // Now induce a fee jump which should cause all the transactions // in the queue to fail with telINSUF_FEE_P. @@ -4427,7 +4600,7 @@ class TxQ1_test : public beast::unit_test::suite // o The _last_ transaction should be dropped from alice's queue. // o The first failing transaction should be dropped from bob's queue. env.close(); - checkMetrics(env, 46, 50, 0, 10, 256); + checkMetrics(__LINE__, env, 46, 50, 0, 10, 256); // Run the local fee back down. while (env.app().getFeeTrack().lowerLocalFee()) @@ -4435,7 +4608,7 @@ class TxQ1_test : public beast::unit_test::suite // bob fills the ledger so it's easier to probe the TxQ. fillQueue(env, bob); - checkMetrics(env, 46, 50, 11, 10, 256); + checkMetrics(__LINE__, env, 46, 50, 11, 10, 256); // Before the close() alice had two transactions in her queue. // We now expect her to have one. Here's the state of alice's queue. @@ -4547,7 +4720,7 @@ class TxQ1_test : public beast::unit_test::suite env.close(); - checkMetrics(env, 0, 50, 4, 6, 256); + checkMetrics(__LINE__, env, 0, 50, 4, 6, 256); } { @@ -4608,7 +4781,7 @@ class TxQ1_test : public beast::unit_test::suite // The ticket transactions that didn't succeed or get queued succeed // this time because the tickets got consumed when the offers came // out of the queue - checkMetrics(env, 0, 50, 8, 7, 256); + checkMetrics(__LINE__, env, 0, 50, 8, 7, 256); } } diff --git a/src/test/basics/join_test.cpp b/src/test/basics/join_test.cpp new file mode 100644 index 00000000000..730fcb69343 --- /dev/null +++ b/src/test/basics/join_test.cpp @@ -0,0 +1,105 @@ +//------------------------------------------------------------------------------ +/* +This file is part of rippled: https://github.com/ripple/rippled +Copyright (c) 2022 Ripple Labs Inc. + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include +#include + +namespace ripple { +namespace test { + +struct join_test : beast::unit_test::suite +{ + void + run() override + { + auto test = [this](auto collectionanddelimiter, std::string expected) { + std::stringstream ss; + // Put something else in the buffer before and after to ensure that + // the << operator returns the stream correctly. + ss << "(" << collectionanddelimiter << ")"; + auto const str = ss.str(); + BEAST_EXPECT(str.substr(1, str.length() - 2) == expected); + BEAST_EXPECT(str.front() == '('); + BEAST_EXPECT(str.back() == ')'); + }; + + // C++ array + test( + CollectionAndDelimiter(std::array{2, -1, 5, 10}, "/"), + "2/-1/5/10"); + // One item C++ array edge case + test( + CollectionAndDelimiter(std::array{"test"}, " & "), + "test"); + // Empty C++ array edge case + test(CollectionAndDelimiter(std::array{}, ","), ""); + { + // C-style array + char letters[4]{'w', 'a', 's', 'd'}; + test(CollectionAndDelimiter(letters, std::to_string(0)), "w0a0s0d"); + } + { + // Auto sized C-style array + std::string words[]{"one", "two", "three", "four"}; + test(CollectionAndDelimiter(words, "\n"), "one\ntwo\nthree\nfour"); + } + { + // One item C-style array edge case + std::string words[]{"thing"}; + test(CollectionAndDelimiter(words, "\n"), "thing"); + } + // Initializer list + test( + CollectionAndDelimiter(std::initializer_list{19, 25}, "+"), + "19+25"); + // vector + test( + CollectionAndDelimiter(std::vector{0, 42}, std::to_string(99)), + "09942"); + { + // vector with one item edge case + using namespace jtx; + test( + CollectionAndDelimiter( + std::vector{Account::master}, "xxx"), + Account::master.human()); + } + // empty vector edge case + test(CollectionAndDelimiter(std::vector{}, ","), ""); + // C-style string + test(CollectionAndDelimiter("string", " "), "s t r i n g"); + // Empty C-style string edge case + test(CollectionAndDelimiter("", "*"), ""); + // Single char C-style string edge case + test(CollectionAndDelimiter("x", "*"), "x"); + // std::string + test(CollectionAndDelimiter(std::string{"string"}, "-"), "s-t-r-i-n-g"); + // Empty std::string edge case + test(CollectionAndDelimiter(std::string{""}, "*"), ""); + // Single char std::string edge case + test(CollectionAndDelimiter(std::string{"y"}, "*"), "y"); + } +}; // namespace test + +BEAST_DEFINE_TESTSUITE(join, ripple_basics, ripple); + +} // namespace test +} // namespace ripple diff --git a/src/test/jtx/impl/paths.cpp b/src/test/jtx/impl/paths.cpp index e27d2789b87..1b9bf52fc29 100644 --- a/src/test/jtx/impl/paths.cpp +++ b/src/test/jtx/impl/paths.cpp @@ -33,7 +33,8 @@ paths::operator()(Env& env, JTx& jt) const auto const to = env.lookup(jv[jss::Destination].asString()); auto const amount = amountFromJson(sfAmount, jv[jss::Amount]); Pathfinder pf( - std::make_shared(env.current()), + std::make_shared( + env.current(), env.app().journal("RippleLineCache")), from, to, in_.currency, diff --git a/src/test/protocol/Hooks_test.cpp b/src/test/protocol/Hooks_test.cpp new file mode 100644 index 00000000000..161404195a4 --- /dev/null +++ b/src/test/protocol/Hooks_test.cpp @@ -0,0 +1,197 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2012-2017 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include + +namespace ripple { + +class Hooks_test : public beast::unit_test::suite +{ + /** + * This unit test was requested here: + * https://github.com/ripple/rippled/pull/4089#issuecomment-1050274539 + * These are tests that exercise facilities that are reserved for when Hooks + * is merged in the future. + **/ + + void + testHookFields() + { + testcase("Test Hooks fields"); + + using namespace test::jtx; + + std::vector> fields_to_test = { + sfHookResult, + sfHookStateChangeCount, + sfHookEmitCount, + sfHookExecutionIndex, + sfHookApiVersion, + sfHookStateCount, + sfEmitGeneration, + sfHookOn, + sfHookInstructionCount, + sfEmitBurden, + sfHookReturnCode, + sfReferenceCount, + sfEmitParentTxnID, + sfEmitNonce, + sfEmitHookHash, + sfHookStateKey, + sfHookHash, + sfHookNamespace, + sfHookSetTxnID, + sfHookStateData, + sfHookReturnString, + sfHookParameterName, + sfHookParameterValue, + sfEmitCallback, + sfHookAccount, + sfEmittedTxn, + sfHook, + sfHookDefinition, + sfHookParameter, + sfHookGrant, + sfEmitDetails, + sfHookExecutions, + sfHookExecution, + sfHookParameters, + sfHooks, + sfHookGrants}; + + for (auto const& rf : fields_to_test) + { + SField const& f = rf.get(); + + STObject dummy{sfGeneric}; + + BEAST_EXPECT(!dummy.isFieldPresent(f)); + + switch (f.fieldType) + { + case STI_UINT8: { + dummy.setFieldU8(f, 0); + BEAST_EXPECT(dummy.getFieldU8(f) == 0); + + dummy.setFieldU8(f, 255); + BEAST_EXPECT(dummy.getFieldU8(f) == 255); + + BEAST_EXPECT(dummy.isFieldPresent(f)); + break; + } + + case STI_UINT16: { + dummy.setFieldU16(f, 0); + BEAST_EXPECT(dummy.getFieldU16(f) == 0); + + dummy.setFieldU16(f, 0xFFFFU); + BEAST_EXPECT(dummy.getFieldU16(f) == 0xFFFFU); + + BEAST_EXPECT(dummy.isFieldPresent(f)); + break; + } + + case STI_UINT32: { + dummy.setFieldU32(f, 0); + BEAST_EXPECT(dummy.getFieldU32(f) == 0); + + dummy.setFieldU32(f, 0xFFFFFFFFU); + BEAST_EXPECT(dummy.getFieldU32(f) == 0xFFFFFFFFU); + + BEAST_EXPECT(dummy.isFieldPresent(f)); + break; + } + + case STI_UINT64: { + dummy.setFieldU64(f, 0); + BEAST_EXPECT(dummy.getFieldU64(f) == 0); + + dummy.setFieldU64(f, 0xFFFFFFFFFFFFFFFFU); + BEAST_EXPECT(dummy.getFieldU64(f) == 0xFFFFFFFFFFFFFFFFU); + + BEAST_EXPECT(dummy.isFieldPresent(f)); + break; + } + + case STI_HASH256: { + uint256 u = uint256::fromVoid( + "DEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBE" + "EFDEADBEEF"); + dummy.setFieldH256(f, u); + BEAST_EXPECT(dummy.getFieldH256(f) == u); + BEAST_EXPECT(dummy.isFieldPresent(f)); + break; + } + + case STI_VL: { + std::vector v{1, 2, 3}; + dummy.setFieldVL(f, v); + BEAST_EXPECT(dummy.getFieldVL(f) == v); + BEAST_EXPECT(dummy.isFieldPresent(f)); + break; + } + + case STI_ACCOUNT: { + AccountID id = *parseBase58( + "rwfSjJNK2YQuN64bSWn7T2eY9FJAyAPYJT"); + dummy.setAccountID(f, id); + BEAST_EXPECT(dummy.getAccountID(f) == id); + BEAST_EXPECT(dummy.isFieldPresent(f)); + break; + } + + case STI_OBJECT: { + dummy.emplace_back(STObject{f}); + BEAST_EXPECT(dummy.getField(f).getFName() == f); + BEAST_EXPECT(dummy.isFieldPresent(f)); + break; + } + + case STI_ARRAY: { + STArray dummy2{f, 2}; + dummy2.push_back(STObject{sfGeneric}); + dummy2.push_back(STObject{sfGeneric}); + dummy.setFieldArray(f, dummy2); + BEAST_EXPECT(dummy.getFieldArray(f) == dummy2); + BEAST_EXPECT(dummy.isFieldPresent(f)); + break; + } + + default: + BEAST_EXPECT(false); + } + } + } + +public: + void + run() override + { + using namespace test::jtx; + testHookFields(); + } +}; + +BEAST_DEFINE_TESTSUITE(Hooks, protocol, ripple); + +} // namespace ripple diff --git a/src/test/rpc/GetCounts_test.cpp b/src/test/rpc/GetCounts_test.cpp index a3b0b716239..52b645ed717 100644 --- a/src/test/rpc/GetCounts_test.cpp +++ b/src/test/rpc/GetCounts_test.cpp @@ -35,6 +35,9 @@ class GetCounts_test : public beast::unit_test::suite Json::Value result; { + using namespace std::chrono_literals; + // Add a little delay so the App's "uptime" will have a value. + std::this_thread::sleep_for(1s); // check counts with no transactions posted result = env.rpc("get_counts")[jss::result]; BEAST_EXPECT(result[jss::status] == "success"); diff --git a/src/test/rpc/LedgerRPC_test.cpp b/src/test/rpc/LedgerRPC_test.cpp index df8bebfacee..fdcefbf66c2 100644 --- a/src/test/rpc/LedgerRPC_test.cpp +++ b/src/test/rpc/LedgerRPC_test.cpp @@ -1539,10 +1539,11 @@ class LedgerRPC_test : public beast::unit_test::suite jrr = env.rpc("json", "ledger", to_string(jv))[jss::result]; const std::string txid1 = [&]() { + auto const& parentHash = env.current()->info().parentHash; if (BEAST_EXPECT(jrr[jss::queue_data].size() == 2)) { - const std::string txid0 = [&]() { - auto const& txj = jrr[jss::queue_data][0u]; + const std::string txid1 = [&]() { + auto const& txj = jrr[jss::queue_data][1u]; BEAST_EXPECT(txj[jss::account] == alice.human()); BEAST_EXPECT(txj[jss::fee_level] == "256"); BEAST_EXPECT(txj["preflight_result"] == "tesSUCCESS"); @@ -1554,7 +1555,7 @@ class LedgerRPC_test : public beast::unit_test::suite return tx[jss::hash].asString(); }(); - auto const& txj = jrr[jss::queue_data][1u]; + auto const& txj = jrr[jss::queue_data][0u]; BEAST_EXPECT(txj[jss::account] == alice.human()); BEAST_EXPECT(txj[jss::fee_level] == "256"); BEAST_EXPECT(txj["preflight_result"] == "tesSUCCESS"); @@ -1563,9 +1564,12 @@ class LedgerRPC_test : public beast::unit_test::suite auto const& tx = txj[jss::tx]; BEAST_EXPECT(tx[jss::Account] == alice.human()); BEAST_EXPECT(tx[jss::TransactionType] == jss::OfferCreate); - const auto txid1 = tx[jss::hash].asString(); - BEAST_EXPECT(txid0 < txid1); - return txid1; + const auto txid0 = tx[jss::hash].asString(); + uint256 tx0, tx1; + BEAST_EXPECT(tx0.parseHex(txid0)); + BEAST_EXPECT(tx1.parseHex(txid1)); + BEAST_EXPECT((tx0 ^ parentHash) < (tx1 ^ parentHash)); + return txid0; } return std::string{}; }(); @@ -1577,6 +1581,7 @@ class LedgerRPC_test : public beast::unit_test::suite jrr = env.rpc("json", "ledger", to_string(jv))[jss::result]; if (BEAST_EXPECT(jrr[jss::queue_data].size() == 2)) { + auto const& parentHash = env.current()->info().parentHash; auto const txid0 = [&]() { auto const& txj = jrr[jss::queue_data][0u]; BEAST_EXPECT(txj[jss::account] == alice.human()); @@ -1593,7 +1598,10 @@ class LedgerRPC_test : public beast::unit_test::suite BEAST_EXPECT(txj["last_result"] == "terPRE_SEQ"); BEAST_EXPECT(txj.isMember(jss::tx)); BEAST_EXPECT(txj[jss::tx] == txid1); - BEAST_EXPECT(txid0 < txid1); + uint256 tx0, tx1; + BEAST_EXPECT(tx0.parseHex(txid0)); + BEAST_EXPECT(tx1.parseHex(txid1)); + BEAST_EXPECT((tx0 ^ parentHash) < (tx1 ^ parentHash)); } env.close(); diff --git a/src/test/rpc/LedgerRequestRPC_test.cpp b/src/test/rpc/LedgerRequestRPC_test.cpp index 0896dadd514..de2ddeff8e4 100644 --- a/src/test/rpc/LedgerRequestRPC_test.cpp +++ b/src/test/rpc/LedgerRequestRPC_test.cpp @@ -318,9 +318,11 @@ class LedgerRequestRPC_test : public beast::unit_test::suite { using namespace test::jtx; using namespace std::chrono_literals; - Env env{*this}; + Env env{*this, envconfig([](std::unique_ptr cfg) { + cfg->NODE_SIZE = 0; + return cfg; + })}; Account const gw{"gateway"}; - env.app().getLedgerMaster().tune(0, 1h); auto const USD = gw["USD"]; env.fund(XRP(100000), gw); diff --git a/src/test/shamap/SHAMapSync_test.cpp b/src/test/shamap/SHAMapSync_test.cpp index f262f5f8bff..ba32f6e80dc 100644 --- a/src/test/shamap/SHAMapSync_test.cpp +++ b/src/test/shamap/SHAMapSync_test.cpp @@ -124,24 +124,18 @@ class SHAMapSync_test : public beast::unit_test::suite destination.setSynching(); { - std::vector gotNodeIDs_a; - std::vector gotNodes_a; + std::vector> a; BEAST_EXPECT(source.getNodeFat( - SHAMapNodeID(), - gotNodeIDs_a, - gotNodes_a, - rand_bool(eng_), - rand_int(eng_, 2))); - - unexpected(gotNodes_a.size() < 1, "NodeSize"); - - BEAST_EXPECT(destination - .addRootNode( - source.getHash(), - makeSlice(*gotNodes_a.begin()), - nullptr) - .isGood()); + SHAMapNodeID(), a, rand_bool(eng_), rand_int(eng_, 2))); + + unexpected(a.size() < 1, "NodeSize"); + + BEAST_EXPECT( + destination + .addRootNode( + source.getHash(), makeSlice(a[0].second), nullptr) + .isGood()); } do @@ -155,8 +149,7 @@ class SHAMapSync_test : public beast::unit_test::suite break; // get as many nodes as possible based on this information - std::vector gotNodeIDs_b; - std::vector gotNodes_b; + std::vector> b; for (auto& it : nodesMissing) { @@ -164,29 +157,24 @@ class SHAMapSync_test : public beast::unit_test::suite // non-deterministic number of times and the number of tests run // should be deterministic if (!source.getNodeFat( - it.first, - gotNodeIDs_b, - gotNodes_b, - rand_bool(eng_), - rand_int(eng_, 2))) + it.first, b, rand_bool(eng_), rand_int(eng_, 2))) fail("", __FILE__, __LINE__); } // Don't use BEAST_EXPECT here b/c it will be called a // non-deterministic number of times and the number of tests run // should be deterministic - if (gotNodeIDs_b.size() != gotNodes_b.size() || - gotNodeIDs_b.empty()) + if (b.empty()) fail("", __FILE__, __LINE__); - for (std::size_t i = 0; i < gotNodeIDs_b.size(); ++i) + for (std::size_t i = 0; i < b.size(); ++i) { // Don't use BEAST_EXPECT here b/c it will be called a // non-deterministic number of times and the number of tests run // should be deterministic if (!destination .addKnownNode( - gotNodeIDs_b[i], makeSlice(gotNodes_b[i]), nullptr) + b[i].first, makeSlice(b[i].second), nullptr) .isUseful()) fail("", __FILE__, __LINE__); }