mirror of https://github.com/ByConity/ByConity
commit
6886d8c2b6
|
@ -26,10 +26,7 @@
|
|||
url = https://github.com/google/double-conversion.git
|
||||
[submodule "contrib/re2"]
|
||||
path = contrib/re2
|
||||
url = https://github.com/google/re2
|
||||
[submodule "contrib/llvm"]
|
||||
path = contrib/llvm
|
||||
url = https://github.com/ClickHouse-Extras/llvm
|
||||
url = https://github.com/ByConity/re2.git
|
||||
[submodule "contrib/mariadb-connector-c"]
|
||||
path = contrib/mariadb-connector-c
|
||||
url = https://github.com/ClickHouse-Extras/mariadb-connector-c.git
|
||||
|
@ -58,12 +55,6 @@
|
|||
[submodule "contrib/libgsasl"]
|
||||
path = contrib/libgsasl
|
||||
url = https://github.com/ClickHouse-Extras/libgsasl.git
|
||||
[submodule "contrib/libcxx"]
|
||||
path = contrib/libcxx
|
||||
url = https://github.com/ClickHouse-Extras/libcxx.git
|
||||
[submodule "contrib/libcxxabi"]
|
||||
path = contrib/libcxxabi
|
||||
url = https://github.com/ByConity/clickhouse-libcxxabi.git
|
||||
[submodule "contrib/snappy"]
|
||||
path = contrib/snappy
|
||||
url = https://github.com/google/snappy
|
||||
|
@ -273,7 +264,7 @@
|
|||
url = https://github.com/ByConity/libhdfs3-open.git
|
||||
[submodule "contrib/boost"]
|
||||
path = contrib/boost
|
||||
url = https://github.com/ClickHouse-Extras/boost.git
|
||||
url = https://github.com/ByConity/boost.git
|
||||
[submodule "contrib/breakpad"]
|
||||
path = contrib/breakpad
|
||||
url = https://github.com/ByConity/breakpad.git
|
||||
|
@ -361,3 +352,6 @@
|
|||
[submodule "contrib/java-extensions"]
|
||||
path = contrib/java-extensions
|
||||
url = https://github.com/ByConity/clickhouse-java-extensions.git
|
||||
[submodule "contrib/llvm-project"]
|
||||
path = contrib/llvm-project
|
||||
url = https://github.com/ByConity/ClickHouse-llvm-project.git
|
||||
|
|
|
@ -285,38 +285,30 @@ endif ()
|
|||
# Enable it explicitly.
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -fasynchronous-unwind-tables")
|
||||
|
||||
if (${CMAKE_VERSION} VERSION_LESS "3.12.4")
|
||||
# CMake < 3.12 doesn't support setting 20 as a C++ standard version.
|
||||
# We will add C++ standard controlling flag in CMAKE_CXX_FLAGS manually for now.
|
||||
|
||||
if (COMPILER_GCC OR COMPILER_CLANG)
|
||||
# to make numeric_limits<__int128> works with GCC
|
||||
set (_CXX_STANDARD "gnu++2a")
|
||||
else ()
|
||||
set (_CXX_STANDARD "c++2a")
|
||||
endif ()
|
||||
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=${_CXX_STANDARD}")
|
||||
else ()
|
||||
set (CMAKE_CXX_STANDARD 20)
|
||||
set (CMAKE_CXX_EXTENSIONS ON) # Same as gnu++2a (ON) vs c++2a (OFF): https://cmake.org/cmake/help/latest/prop_tgt/CXX_EXTENSIONS.html
|
||||
set (CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
endif ()
|
||||
set (CMAKE_CXX_STANDARD 23)
|
||||
set (CMAKE_CXX_EXTENSIONS OFF)
|
||||
set (CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
|
||||
set (CMAKE_C_STANDARD 11)
|
||||
set (CMAKE_C_EXTENSIONS ON)
|
||||
set (CMAKE_C_EXTENSIONS ON) # required by most contribs written in C
|
||||
set (CMAKE_C_STANDARD_REQUIRED ON)
|
||||
|
||||
if (COMPILER_GCC OR COMPILER_CLANG)
|
||||
# Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
|
||||
endif ()
|
||||
# Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
|
||||
|
||||
# falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable
|
||||
# benchmarks.
|
||||
if (COMPILER_GCC OR COMPILER_CLANG)
|
||||
set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
|
||||
endif ()
|
||||
set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
|
||||
|
||||
if (ARCH_AMD64)
|
||||
# align branches within a 32-Byte boundary to avoid the potential performance loss when code layout change,
|
||||
# which makes benchmark results more stable.
|
||||
set(BRANCHES_WITHIN_32B_BOUNDARIES "-mbranches-within-32B-boundaries")
|
||||
set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}")
|
||||
endif()
|
||||
|
||||
# Disable floating-point expression contraction in order to get consistent floating point calculation results across platforms
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffp-contract=off")
|
||||
|
||||
# Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc
|
||||
option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF)
|
||||
|
@ -538,7 +530,6 @@ include (cmake/find/rdkafka.cmake)
|
|||
include (cmake/find/libuv.cmake) # for amqpcpp and cassandra
|
||||
include (cmake/find/amqpcpp.cmake)
|
||||
include (cmake/find/capnp.cmake)
|
||||
include (cmake/find/llvm.cmake)
|
||||
include (cmake/find/h3.cmake)
|
||||
include (cmake/find/libxml2.cmake)
|
||||
include (cmake/find/brotli.cmake)
|
||||
|
|
|
@ -54,6 +54,11 @@ if (CCACHE_FOUND AND NOT COMPILER_MATCHES_CCACHE)
|
|||
set_property (GLOBAL PROPERTY RULE_LAUNCH_COMPILE "env -u SOURCE_DATE_EPOCH ${CCACHE_FOUND}")
|
||||
set_property (GLOBAL PROPERTY RULE_LAUNCH_LINK "env -u SOURCE_DATE_EPOCH ${CCACHE_FOUND}")
|
||||
endif()
|
||||
elseif (CCACHE_VERSION VERSION_EQUAL "unknown")
|
||||
set (CMAKE_CXX_COMPILER_LAUNCHER ${CCACHE_FOUND} ${CMAKE_CXX_COMPILER_LAUNCHER})
|
||||
set (CMAKE_C_COMPILER_LAUNCHER ${CCACHE_FOUND} ${CMAKE_C_COMPILER_LAUNCHER})
|
||||
|
||||
set_property (GLOBAL PROPERTY RULE_LAUNCH_LINK ${CCACHE_FOUND})
|
||||
else ()
|
||||
message(${RECONFIGURE_MESSAGE_LEVEL} "Not using ${CCACHE_FOUND} ${CCACHE_VERSION} bug: https://bugzilla.samba.org/show_bug.cgi?id=8118")
|
||||
endif ()
|
||||
|
|
|
@ -1,71 +1,8 @@
|
|||
option (USE_LIBCXX "Use libc++ and libc++abi instead of libstdc++" ${NOT_UNBUNDLED})
|
||||
|
||||
if (NOT USE_LIBCXX)
|
||||
if (USE_INTERNAL_LIBCXX_LIBRARY)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot use internal libcxx with USE_LIBCXX=OFF")
|
||||
endif()
|
||||
|
||||
target_link_libraries(global-libs INTERFACE -l:libstdc++.a -l:libstdc++fs.a) # Always link these libraries as static
|
||||
target_link_libraries(global-libs INTERFACE ${EXCEPTION_HANDLING_LIBRARY})
|
||||
return()
|
||||
endif()
|
||||
|
||||
set(USE_INTERNAL_LIBCXX_LIBRARY_DEFAULT ${NOT_UNBUNDLED})
|
||||
|
||||
option (USE_INTERNAL_LIBCXX_LIBRARY "Disable to use system libcxx and libcxxabi libraries instead of bundled"
|
||||
${USE_INTERNAL_LIBCXX_LIBRARY_DEFAULT})
|
||||
|
||||
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libcxx/CMakeLists.txt")
|
||||
if (USE_INTERNAL_LIBCXX_LIBRARY)
|
||||
message(WARNING "submodule contrib/libcxx is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal libcxx")
|
||||
set(USE_INTERNAL_LIBCXX_LIBRARY 0)
|
||||
endif()
|
||||
set(USE_INTERNAL_LIBCXX_LIBRARY_DEFAULT 0)
|
||||
set(MISSING_INTERNAL_LIBCXX_LIBRARY 1)
|
||||
endif()
|
||||
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -D_LIBCPP_DEBUG=0") # More checks in debug build.
|
||||
|
||||
if (NOT USE_INTERNAL_LIBCXX_LIBRARY)
|
||||
find_library (LIBCXX_LIBRARY c++)
|
||||
find_library (LIBCXXFS_LIBRARY c++fs)
|
||||
find_library (LIBCXXABI_LIBRARY c++abi)
|
||||
add_subdirectory(contrib/libcxxabi-cmake)
|
||||
add_subdirectory(contrib/libcxx-cmake)
|
||||
|
||||
if(LIBCXX_LIBRARY AND LIBCXXABI_LIBRARY) # c++fs is now a part of the libc++
|
||||
set (HAVE_LIBCXX 1)
|
||||
else ()
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find system libcxx")
|
||||
endif()
|
||||
# Exception handling library is embedded into libcxxabi.
|
||||
|
||||
if(NOT LIBCXXFS_LIBRARY)
|
||||
set(LIBCXXFS_LIBRARY ${LIBCXX_LIBRARY})
|
||||
endif()
|
||||
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
|
||||
|
||||
target_link_libraries(global-libs INTERFACE ${EXCEPTION_HANDLING_LIBRARY})
|
||||
endif ()
|
||||
|
||||
if (NOT HAVE_LIBCXX AND NOT MISSING_INTERNAL_LIBCXX_LIBRARY)
|
||||
set (LIBCXX_LIBRARY cxx)
|
||||
set (LIBCXXABI_LIBRARY cxxabi)
|
||||
add_subdirectory(contrib/libcxxabi-cmake)
|
||||
add_subdirectory(contrib/libcxx-cmake)
|
||||
|
||||
# Exception handling library is embedded into libcxxabi.
|
||||
|
||||
set (HAVE_LIBCXX 1)
|
||||
set(USE_INTERNAL_LIBCXX_LIBRARY 1)
|
||||
endif ()
|
||||
|
||||
if (HAVE_LIBCXX)
|
||||
target_link_libraries(global-libs INTERFACE ${LIBCXX_LIBRARY} ${LIBCXXABI_LIBRARY} ${LIBCXXFS_LIBRARY})
|
||||
|
||||
message (STATUS "Using libcxx: ${LIBCXX_LIBRARY}")
|
||||
message (STATUS "Using libcxxfs: ${LIBCXXFS_LIBRARY}")
|
||||
message (STATUS "Using libcxxabi: ${LIBCXXABI_LIBRARY}")
|
||||
else()
|
||||
target_link_libraries(global-libs INTERFACE -l:libstdc++.a -l:libstdc++fs.a) # Always link these libraries as static
|
||||
target_link_libraries(global-libs INTERFACE ${EXCEPTION_HANDLING_LIBRARY})
|
||||
endif()
|
||||
target_link_libraries(global-libs INTERFACE cxx cxxabi)
|
||||
|
|
|
@ -1,77 +0,0 @@
|
|||
if (APPLE OR SPLIT_SHARED_LIBRARIES OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined")
|
||||
set (ENABLE_EMBEDDED_COMPILER OFF CACHE INTERNAL "")
|
||||
endif()
|
||||
|
||||
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ON)
|
||||
|
||||
if (NOT ENABLE_EMBEDDED_COMPILER)
|
||||
set (USE_EMBEDDED_COMPILER 0)
|
||||
return()
|
||||
endif()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/llvm/llvm/CMakeLists.txt")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "submodule /contrib/llvm is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
endif ()
|
||||
|
||||
set (USE_EMBEDDED_COMPILER 1)
|
||||
|
||||
set (LLVM_FOUND 1)
|
||||
set (LLVM_VERSION "12.0.0bundled")
|
||||
set (LLVM_INCLUDE_DIRS
|
||||
"${ClickHouse_SOURCE_DIR}/contrib/llvm/llvm/include"
|
||||
"${ClickHouse_BINARY_DIR}/contrib/llvm/llvm/include"
|
||||
)
|
||||
set (LLVM_LIBRARY_DIRS "${ClickHouse_BINARY_DIR}/contrib/llvm/llvm")
|
||||
|
||||
message(STATUS "LLVM include Directory: ${LLVM_INCLUDE_DIRS}")
|
||||
message(STATUS "LLVM library Directory: ${LLVM_LIBRARY_DIRS}")
|
||||
message(STATUS "LLVM C++ compiler flags: ${LLVM_CXXFLAGS}")
|
||||
|
||||
# This list was generated by listing all LLVM libraries, compiling the binary and removing all libraries while it still compiles.
|
||||
set (REQUIRED_LLVM_LIBRARIES
|
||||
LLVMExecutionEngine
|
||||
LLVMRuntimeDyld
|
||||
LLVMX86CodeGen
|
||||
LLVMX86Desc
|
||||
LLVMX86Info
|
||||
LLVMAsmPrinter
|
||||
LLVMDebugInfoDWARF
|
||||
LLVMGlobalISel
|
||||
LLVMSelectionDAG
|
||||
LLVMMCDisassembler
|
||||
LLVMPasses
|
||||
LLVMCodeGen
|
||||
LLVMipo
|
||||
LLVMBitWriter
|
||||
LLVMInstrumentation
|
||||
LLVMScalarOpts
|
||||
LLVMAggressiveInstCombine
|
||||
LLVMInstCombine
|
||||
LLVMVectorize
|
||||
LLVMTransformUtils
|
||||
LLVMTarget
|
||||
LLVMAnalysis
|
||||
LLVMProfileData
|
||||
LLVMObject
|
||||
LLVMBitReader
|
||||
LLVMCore
|
||||
LLVMRemarks
|
||||
LLVMBitstreamReader
|
||||
LLVMMCParser
|
||||
LLVMMC
|
||||
LLVMBinaryFormat
|
||||
LLVMDebugInfoCodeView
|
||||
LLVMSupport
|
||||
LLVMDemangle
|
||||
)
|
||||
|
||||
#function(llvm_libs_all REQUIRED_LLVM_LIBRARIES)
|
||||
# llvm_map_components_to_libnames (result all)
|
||||
# if (USE_STATIC_LIBRARIES OR NOT "LLVM" IN_LIST result)
|
||||
# list (REMOVE_ITEM result "LTO" "LLVM")
|
||||
# else()
|
||||
# set (result "LLVM")
|
||||
# endif ()
|
||||
# list (APPEND result ${CMAKE_DL_LIBS} ${ZLIB_LIBRARIES})
|
||||
# set (${REQUIRED_LLVM_LIBRARIES} ${result} PARENT_SCOPE)
|
||||
#endfunction()
|
|
@ -109,6 +109,8 @@ if (COMPILER_CLANG)
|
|||
no_warning(enum-constexpr-conversion)
|
||||
no_warning(documentation-html)
|
||||
no_warning(documentation)
|
||||
no_warning(unsafe-buffer-usage) # too aggressive
|
||||
no_warning(thread-safety-negative) # experimental flag, too many false positives
|
||||
|
||||
# XXX: libstdc++ has some of these for 3way compare
|
||||
if (NOT USE_LIBCXX)
|
||||
|
|
|
@ -256,27 +256,7 @@ elseif(GTEST_SRC_DIR)
|
|||
target_compile_definitions(gmock INTERFACE)
|
||||
endif()
|
||||
|
||||
if (USE_EMBEDDED_COMPILER)
|
||||
# ld: unknown option: --color-diagnostics
|
||||
if (APPLE)
|
||||
set (LINKER_SUPPORTS_COLOR_DIAGNOSTICS 0 CACHE INTERNAL "")
|
||||
endif ()
|
||||
|
||||
set (LLVM_ENABLE_EH 1 CACHE INTERNAL "")
|
||||
set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "")
|
||||
set (LLVM_ENABLE_PIC 0 CACHE INTERNAL "")
|
||||
set (LLVM_TARGETS_TO_BUILD "X86;AArch64" CACHE STRING "")
|
||||
|
||||
# Need to use C++17 since the compilation is not possible with C++20 currently, due to ambiguous operator != etc.
|
||||
# LLVM project will set its default value for the -std=... but our global setting from CMake will override it.
|
||||
set (CMAKE_CXX_STANDARD_bak ${CMAKE_CXX_STANDARD})
|
||||
set (CMAKE_CXX_STANDARD 17)
|
||||
|
||||
add_subdirectory (llvm/llvm)
|
||||
|
||||
set (CMAKE_CXX_STANDARD ${CMAKE_CXX_STANDARD_bak})
|
||||
unset (CMAKE_CXX_STANDARD_bak)
|
||||
endif ()
|
||||
add_subdirectory (llvm-project-cmake)
|
||||
|
||||
if (USE_INTERNAL_LIBGSASL_LIBRARY)
|
||||
add_subdirectory(libgsasl)
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit c0807e83f2824e8dd67a15b355496a9b784cdcd5
|
||||
Subproject commit 208e9c7a2ce928a239d9121c3498108b78f7403c
|
|
@ -1,3 +1,4 @@
|
|||
set (CMAKE_CXX_STANDARD 11)
|
||||
option (USE_INTERNAL_BOOST_LIBRARY "Use internal Boost library" ${NOT_UNBUNDLED})
|
||||
|
||||
if (NOT USE_INTERNAL_BOOST_LIBRARY)
|
||||
|
@ -86,8 +87,11 @@ if (NOT EXTERNAL_BOOST_FOUND)
|
|||
target_include_directories (_boost_headers_only SYSTEM BEFORE INTERFACE ${LIBRARY_DIR})
|
||||
|
||||
# asio
|
||||
|
||||
target_compile_definitions (_boost_headers_only INTERFACE BOOST_ASIO_STANDALONE=1)
|
||||
target_compile_definitions (_boost_headers_only INTERFACE
|
||||
BOOST_ASIO_STANDALONE=1
|
||||
BOOST_ASIO_HAS_STD_INVOKE_RESULT=1 # Avoid using of deprecated in c++ > 17 std::result_of
|
||||
BOOST_TIMER_ENABLE_DEPRECATED=1 # wordnet-blast (enabled via USE_NLP) uses Boost legacy timer classes
|
||||
)
|
||||
|
||||
# iostreams
|
||||
|
||||
|
@ -203,9 +207,9 @@ if (NOT EXTERNAL_BOOST_FOUND)
|
|||
# coroutine
|
||||
|
||||
set (SRCS_COROUTINE
|
||||
"${LIBRARY_DIR}/libs/coroutine/detail/coroutine_context.cpp"
|
||||
"${LIBRARY_DIR}/libs/coroutine/exceptions.cpp"
|
||||
"${LIBRARY_DIR}/libs/coroutine/posix/stack_traits.cpp"
|
||||
"${LIBRARY_DIR}/libs/coroutine/src/detail/coroutine_context.cpp"
|
||||
"${LIBRARY_DIR}/libs/coroutine/src/exceptions.cpp"
|
||||
"${LIBRARY_DIR}/libs/coroutine/src/posix/stack_traits.cpp"
|
||||
)
|
||||
add_library (_boost_coroutine ${SRCS_COROUTINE})
|
||||
add_library (boost::coroutine ALIAS _boost_coroutine)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
set (CMAKE_CXX_STANDARD 20)
|
||||
set(_gRPC_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/grpc")
|
||||
set(_gRPC_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/grpc")
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
set (CMAKE_CXX_STANDARD 17)
|
||||
if (HAVE_SSSE3)
|
||||
option (ENABLE_HYPERSCAN "Enable hyperscan library" ${ENABLE_LIBRARIES})
|
||||
elseif(ENABLE_HYPERSCAN)
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
Subproject commit 2fa892f69acbaa40f8a18c6484854a6183a34482
|
|
@ -1,6 +1,6 @@
|
|||
include(CheckCXXCompilerFlag)
|
||||
|
||||
set(LIBCXX_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libcxx")
|
||||
set(LIBCXX_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm-project/libcxx")
|
||||
|
||||
set(SRCS
|
||||
"${LIBCXX_SOURCE_DIR}/src/algorithm.cpp"
|
||||
|
@ -18,6 +18,7 @@ set(SRCS
|
|||
"${LIBCXX_SOURCE_DIR}/src/filesystem/directory_iterator.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/filesystem/int128_builtins.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/filesystem/operations.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/format.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/functional.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/future.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/hash.cpp"
|
||||
|
@ -44,11 +45,14 @@ set(SRCS
|
|||
"${LIBCXX_SOURCE_DIR}/src/valarray.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/variant.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/vector.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/verbose_abort.cpp"
|
||||
)
|
||||
|
||||
add_library(cxx ${SRCS})
|
||||
set_target_properties(cxx PROPERTIES FOLDER "contrib/libcxx-cmake")
|
||||
|
||||
target_include_directories(cxx SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/include>)
|
||||
target_include_directories(cxx SYSTEM BEFORE PRIVATE $<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/src>)
|
||||
target_include_directories(cxx SYSTEM BEFORE PUBLIC $<$<COMPILE_LANGUAGE:CXX>:$<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/include>>)
|
||||
target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI)
|
||||
|
||||
# Enable capturing stack traces for all exceptions.
|
||||
|
@ -56,16 +60,14 @@ if (USE_UNWIND)
|
|||
target_compile_definitions(cxx PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1)
|
||||
endif ()
|
||||
|
||||
# Override the deduced attribute support that causes error.
|
||||
if (OS_DARWIN AND COMPILER_GCC)
|
||||
add_compile_definitions(_LIBCPP_INIT_PRIORITY_MAX)
|
||||
endif ()
|
||||
|
||||
target_compile_options(cxx PUBLIC $<$<COMPILE_LANGUAGE:CXX>:-nostdinc++>)
|
||||
|
||||
# Third party library may have substandard code.
|
||||
target_compile_options(cxx PRIVATE -w)
|
||||
|
||||
# Enable support for Clang-Thread-Safety-Analysis in libcxx
|
||||
target_compile_definitions(cxx PUBLIC -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS)
|
||||
|
||||
target_link_libraries(cxx PUBLIC cxxabi)
|
||||
|
||||
install(
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
Subproject commit 618c4d26a3d6720d4e49b5e8f3aef100c3a40890
|
|
@ -1,4 +1,4 @@
|
|||
set(LIBCXXABI_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libcxxabi")
|
||||
set(LIBCXXABI_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm-project/libcxxabi")
|
||||
|
||||
set(SRCS
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/stdlib_stdexcept.cpp"
|
||||
|
@ -22,6 +22,7 @@ set(SRCS
|
|||
)
|
||||
|
||||
add_library(cxxabi ${SRCS})
|
||||
set_target_properties(cxxabi PROPERTIES FOLDER "contrib/libcxxabi-cmake")
|
||||
|
||||
# Third party library may have substandard code.
|
||||
target_compile_options(cxxabi PRIVATE -w)
|
||||
|
@ -29,6 +30,7 @@ target_compile_options(cxxabi PRIVATE -w)
|
|||
target_include_directories(cxxabi SYSTEM BEFORE
|
||||
PUBLIC $<BUILD_INTERFACE:${LIBCXXABI_SOURCE_DIR}/include>
|
||||
PRIVATE $<BUILD_INTERFACE:${LIBCXXABI_SOURCE_DIR}/../libcxx/include>
|
||||
PRIVATE $<BUILD_INTERFACE:${LIBCXXABI_SOURCE_DIR}/../libcxx/src>
|
||||
)
|
||||
target_compile_definitions(cxxabi PRIVATE -D_LIBCPP_BUILDING_LIBRARY)
|
||||
target_compile_options(cxxabi PRIVATE -nostdinc++ -fno-sanitize=undefined -Wno-macro-redefined) # If we don't disable UBSan, infinite recursion happens in dynamic_cast.
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
Subproject commit e5751459412bce1391fb7a2e9bbc01e131bf72f1
|
|
@ -0,0 +1 @@
|
|||
Subproject commit c1133c0f659d55b24fcefa3da0fd4961778b91ce
|
|
@ -0,0 +1,122 @@
|
|||
if (APPLE OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined")
|
||||
set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF)
|
||||
else()
|
||||
set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON)
|
||||
endif()
|
||||
|
||||
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT})
|
||||
|
||||
option (ENABLE_BLAKE3 "Enable BLAKE3 function" ${ENABLE_LIBRARIES})
|
||||
# If USE_STATIC_LIBRARIES=0 was passed to CMake, we'll still build LLVM statically to keep complexity minimal.
|
||||
|
||||
if (NOT ENABLE_EMBEDDED_COMPILER)
|
||||
message(STATUS "Not using LLVM")
|
||||
return()
|
||||
endif()
|
||||
|
||||
# TODO: Enable compilation on AArch64
|
||||
|
||||
set (LLVM_VERSION "15.0.0bundled")
|
||||
set (LLVM_INCLUDE_DIRS
|
||||
"${ClickHouse_SOURCE_DIR}/contrib/llvm-project/llvm/include"
|
||||
"${ClickHouse_BINARY_DIR}/contrib/llvm-project/llvm/include"
|
||||
)
|
||||
set (LLVM_LIBRARY_DIRS "${ClickHouse_BINARY_DIR}/contrib/llvm-project/llvm")
|
||||
# NOTE: You should not remove this line since otherwise it will use default 20,
|
||||
# and llvm cannot be compiled with bundled libcxx and 20 standard.
|
||||
set (CMAKE_CXX_STANDARD 17)
|
||||
|
||||
# This list was generated by listing all LLVM libraries, compiling the binary and removing all libraries while it still compiles.
|
||||
set (REQUIRED_LLVM_LIBRARIES
|
||||
LLVMExecutionEngine
|
||||
LLVMRuntimeDyld
|
||||
LLVMAsmPrinter
|
||||
LLVMDebugInfoDWARF
|
||||
LLVMGlobalISel
|
||||
LLVMSelectionDAG
|
||||
LLVMMCDisassembler
|
||||
LLVMPasses
|
||||
LLVMCodeGen
|
||||
LLVMipo
|
||||
LLVMBitWriter
|
||||
LLVMInstrumentation
|
||||
LLVMScalarOpts
|
||||
LLVMAggressiveInstCombine
|
||||
LLVMInstCombine
|
||||
LLVMVectorize
|
||||
LLVMTransformUtils
|
||||
LLVMTarget
|
||||
LLVMAnalysis
|
||||
LLVMProfileData
|
||||
LLVMObject
|
||||
LLVMBitReader
|
||||
LLVMCore
|
||||
LLVMRemarks
|
||||
LLVMBitstreamReader
|
||||
LLVMMCParser
|
||||
LLVMMC
|
||||
LLVMBinaryFormat
|
||||
LLVMDebugInfoCodeView
|
||||
LLVMSupport
|
||||
LLVMDemangle
|
||||
)
|
||||
|
||||
# if (ARCH_AMD64)
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMX86Info LLVMX86Desc LLVMX86CodeGen)
|
||||
# elseif (ARCH_AARCH64)
|
||||
# list(APPEND REQUIRED_LLVM_LIBRARIES LLVMAArch64Info LLVMAArch64Desc LLVMAArch64CodeGen)
|
||||
# endif ()
|
||||
|
||||
set (CMAKE_INSTALL_RPATH "ON") # Do not adjust RPATH in llvm, since then it will not be able to find libcxx/libcxxabi/libunwind
|
||||
set (LLVM_COMPILER_CHECKED 1 CACHE INTERNAL "") # Skip internal compiler selection
|
||||
set (LLVM_ENABLE_EH 1 CACHE INTERNAL "") # With exception handling
|
||||
set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "")
|
||||
set (LLVM_ENABLE_PIC 0 CACHE INTERNAL "")
|
||||
set (LLVM_TARGETS_TO_BUILD "X86" CACHE STRING "") # for x86 + ARM: "X86;AArch64"
|
||||
|
||||
# Omit unnecessary stuff (just the options which are ON by default)
|
||||
set(LLVM_ENABLE_BACKTRACES 0 CACHE INTERNAL "")
|
||||
set(LLVM_ENABLE_CRASH_OVERRIDES 0 CACHE INTERNAL "")
|
||||
set(LLVM_ENABLE_TERMINFO 0 CACHE INTERNAL "")
|
||||
set(LLVM_ENABLE_LIBXML2 0 CACHE INTERNAL "")
|
||||
set(LLVM_ENABLE_LIBEDIT 0 CACHE INTERNAL "")
|
||||
set(LLVM_ENABLE_LIBPFM 0 CACHE INTERNAL "")
|
||||
set(LLVM_ENABLE_ZLIB 0 CACHE INTERNAL "")
|
||||
set(LLVM_ENABLE_ZSTD 0 CACHE INTERNAL "")
|
||||
set(LLVM_ENABLE_Z3_SOLVER 0 CACHE INTERNAL "")
|
||||
set(LLVM_INCLUDE_TOOLS 0 CACHE INTERNAL "")
|
||||
set(LLVM_BUILD_TOOLS 0 CACHE INTERNAL "")
|
||||
set(LLVM_INCLUDE_UTILS 0 CACHE INTERNAL "")
|
||||
set(LLVM_BUILD_UTILS 0 CACHE INTERNAL "")
|
||||
set(LLVM_INCLUDE_RUNTIMES 0 CACHE INTERNAL "")
|
||||
set(LLVM_BUILD_RUNTIMES 0 CACHE INTERNAL "")
|
||||
set(LLVM_BUILD_RUNTIME 0 CACHE INTERNAL "")
|
||||
set(LLVM_INCLUDE_EXAMPLES 0 CACHE INTERNAL "")
|
||||
set(LLVM_INCLUDE_TESTS 0 CACHE INTERNAL "")
|
||||
set(LLVM_INCLUDE_GO_TESTS 0 CACHE INTERNAL "")
|
||||
set(LLVM_INCLUDE_BENCHMARKS 0 CACHE INTERNAL "")
|
||||
set(LLVM_INCLUDE_DOCS 0 CACHE INTERNAL "")
|
||||
set(LLVM_ENABLE_OCAMLDOC 0 CACHE INTERNAL "")
|
||||
set(LLVM_ENABLE_BINDINGS 0 CACHE INTERNAL "")
|
||||
|
||||
set (LLVM_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm-project/llvm")
|
||||
set (LLVM_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/llvm-project/llvm")
|
||||
add_subdirectory ("${LLVM_SOURCE_DIR}" "${LLVM_BINARY_DIR}")
|
||||
|
||||
set_directory_properties (PROPERTIES
|
||||
# due to llvm crosscompile cmake does not know how to clean it, and on clean
|
||||
# will lead to the following error:
|
||||
#
|
||||
# ninja: error: remove(contrib/llvm/llvm/NATIVE): Directory not empty
|
||||
#
|
||||
ADDITIONAL_CLEAN_FILES "${LLVM_BINARY_DIR}"
|
||||
# llvm's cmake configuring this file only when cmake runs,
|
||||
# and after clean cmake will not know that it should re-run,
|
||||
# add explicitly depends from llvm-config.h
|
||||
CMAKE_CONFIGURE_DEPENDS "${LLVM_BINARY_DIR}/include/llvm/Config/llvm-config.h"
|
||||
)
|
||||
|
||||
add_library (_llvm INTERFACE)
|
||||
target_link_libraries (_llvm INTERFACE ${REQUIRED_LLVM_LIBRARIES})
|
||||
target_include_directories (_llvm SYSTEM BEFORE INTERFACE ${LLVM_INCLUDE_DIRS})
|
||||
add_library(ch_contrib::llvm ALIAS _llvm)
|
|
@ -1,3 +1,4 @@
|
|||
set (CMAKE_CXX_STANDARD 17)
|
||||
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/poco")
|
||||
|
||||
add_subdirectory (Crypto)
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 07c77549a20b63ff6981b400085eba36bb5c80c4
|
||||
Subproject commit 5f003e4a22d2e48e37c98d9620241237cd30dd24
|
|
@ -12,19 +12,6 @@ option(WITH_LZ4 "build with lz4" ON)
|
|||
option(WITH_ZLIB "build with zlib" ON)
|
||||
option(WITH_ZSTD "build with zstd" ON)
|
||||
|
||||
# third-party/folly is only validated to work on Linux and Windows for now.
|
||||
# So only turn it on there by default.
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "Linux|Windows")
|
||||
if(MSVC AND MSVC_VERSION LESS 1910)
|
||||
# Folly does not compile with MSVC older than VS2017
|
||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
||||
else()
|
||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" ON)
|
||||
endif()
|
||||
else()
|
||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
||||
endif()
|
||||
|
||||
if( NOT DEFINED CMAKE_CXX_STANDARD )
|
||||
set(CMAKE_CXX_STANDARD 11)
|
||||
endif()
|
||||
|
@ -70,10 +57,6 @@ else()
|
|||
endif()
|
||||
endif()
|
||||
|
||||
set(BUILD_VERSION_CC rocksdb_build_version.cc)
|
||||
add_library(rocksdb_build_version OBJECT ${BUILD_VERSION_CC})
|
||||
|
||||
target_include_directories(rocksdb_build_version PRIVATE "${ROCKSDB_SOURCE_DIR}/util")
|
||||
|
||||
include(CheckCCompilerFlag)
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
||||
|
@ -235,280 +218,348 @@ endif()
|
|||
|
||||
include_directories(${ROCKSDB_SOURCE_DIR})
|
||||
include_directories("${ROCKSDB_SOURCE_DIR}/include")
|
||||
if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
||||
include_directories("${ROCKSDB_SOURCE_DIR}/third-party/folly")
|
||||
endif()
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
# Main library source code
|
||||
|
||||
set(SOURCES
|
||||
"${ROCKSDB_SOURCE_DIR}/cache/cache.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_cache.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_garbage.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_meta.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/builder.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/c.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/column_family.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/compacted_db_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_job.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_fifo.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_level.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_universal.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/sst_partitioner.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/convenience.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_filesnapshot.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_readonly.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_secondary.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_info_dumper.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_iter.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/dbformat.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/error_handler.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/event_helpers.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/experimental.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/external_sst_file_ingestion_job.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/file_indexer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/flush_job.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/flush_scheduler.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/forward_iterator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/import_column_family_job.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/internal_stats.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/logs_with_prep_tracker.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/log_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/log_writer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/malloc_stats.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/memtable.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/memtable_list.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/output_validator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/periodic_work_scheduler.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/range_tombstone_fragmenter.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/repair.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/snapshot_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/table_cache.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/table_properties_collector.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/transaction_log_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/trim_history_scheduler.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/version_builder.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/version_edit.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/version_edit_handler.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/version_set.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/wal_edit.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/write_batch.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/write_controller.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/write_thread.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/env/env.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/env/env_chroot.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/env/env_hdfs.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/env/file_system.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/env/mock_env.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/file/file_util.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/file/filename.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/file/random_access_file_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/file/read_write_util.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/file/readahead_raf.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/file/sequence_file_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/file/sst_file_manager_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/file/writable_file_writer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/logging/auto_roll_logger.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/logging/event_logger.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/logging/log_buffer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/memory/arena.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/memtable/skiplistrep.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/memtable/vectorrep.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/memtable/write_buffer_manager.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/histogram.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/histogram_windowing.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/in_memory_stats_history.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/instrumented_mutex.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/iostats_context.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/perf_context.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/perf_level.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/persistent_stats_history.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/statistics.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_updater.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util_debug.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/options/cf_options.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/options/configurable.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/options/customizable.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/options/db_options.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/options/options.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/options/options_helper.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/options/options_parser.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/port/stack_trace.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/adaptive/adaptive_table_factory.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/binary_search_index_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_filter_block.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_builder.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_factory.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_iterator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_builder.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefetcher.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefix_index.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_hash_index.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_footer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/filter_block_reader_common.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/filter_policy.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/flush_block_policy.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/full_filter_block.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/hash_index_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/index_builder.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/index_reader_common.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/parsed_full_filter_block.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_filter_block.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_iterator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/reader_common.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/uncompression_dict_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_fetcher.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_builder.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_factory.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/format.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/get_context.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/iterator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/merging_iterator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/meta_blocks.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/persistent_cache_helper.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_bloom.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_builder.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_factory.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_index.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_key_coding.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/sst_file_dumper.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/sst_file_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/sst_file_writer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/table_factory.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/table_properties.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/test_util/transaction_test_util.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/tools/block_cache_analyzer/block_cache_trace_analyzer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/tools/dump/db_dump_tool.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/tools/io_tracer_parser_tool.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/tools/ldb_cmd.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/coding.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/comparator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/crc32c.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/dynamic_bloom.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/hash.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/random.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/slice.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/status.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/string_util.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/thread_local.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/xxhash.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/backupable/backupable_db.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_compaction_filter.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/debug.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/sortlist.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend2.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/uint64add.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/object_registry.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/option_change_migration/option_change_migration.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/options/options_util.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_file.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_metadata.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/persistent_cache_tier.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction_db_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction_db.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/snapshot_checker.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_base.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_db_mutex_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_util.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc"
|
||||
$<TARGET_OBJECTS:rocksdb_build_version>)
|
||||
${ROCKSDB_SOURCE_DIR}/cache/cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/cache_entry_roles.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/cache_key.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/cache_helpers.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/cache_reservation_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/charged_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/compressed_secondary_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/secondary_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/secondary_cache_adapter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/tiered_secondary_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/attribute_group_iterator_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_contents.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_fetcher.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_garbage.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_meta.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_garbage_meter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_source.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/prefetch_buffer_collection.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/c.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/coalescing_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/column_family.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_job.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_fifo.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_level.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_universal.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_service_job.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_state.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_outputs.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/sst_partitioner.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/subcompaction_state.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/convenience.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_filesnapshot.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/compacted_db_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_follower.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_readonly.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_secondary.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_info_dumper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_iter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/dbformat.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/error_handler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/event_helpers.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/experimental.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/external_sst_file_ingestion_job.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/file_indexer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/flush_job.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/flush_scheduler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/forward_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/import_column_family_job.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/internal_stats.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/logs_with_prep_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/log_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/log_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/malloc_stats.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/memtable.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/memtable_list.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/output_validator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/periodic_task_scheduler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/range_tombstone_fragmenter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/repair.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/seqno_to_time_mapping.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/snapshot_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/table_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/table_properties_collector.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/transaction_log_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/trim_history_scheduler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/version_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/version_edit.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/version_edit_handler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/version_set.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/wal_edit.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/wide/wide_column_serialization.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/wide/wide_columns.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/wide/wide_columns_helper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/write_batch.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/write_controller.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/write_stall_stats.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/write_thread.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/composite_env.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/env.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/env_chroot.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/file_system.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/fs_on_demand.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/mock_env.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/unique_id_gen.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/file_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/filename.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/line_file_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/random_access_file_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/read_write_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/readahead_raf.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/sequence_file_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/sst_file_manager_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/writable_file_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/logging/auto_roll_logger.cc
|
||||
${ROCKSDB_SOURCE_DIR}/logging/event_logger.cc
|
||||
${ROCKSDB_SOURCE_DIR}/logging/log_buffer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memory/arena.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memory/memory_allocator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/skiplistrep.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/vectorrep.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/write_buffer_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/histogram.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/histogram_windowing.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/in_memory_stats_history.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/instrumented_mutex.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/iostats_context.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/perf_context.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/perf_level.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/persistent_stats_history.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/statistics.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_updater.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util_debug.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/cf_options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/configurable.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/customizable.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/db_options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/offpeak_time_info.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/options_helper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/options_parser.cc
|
||||
${ROCKSDB_SOURCE_DIR}/port/mmap.cc
|
||||
${ROCKSDB_SOURCE_DIR}/port/stack_trace.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/adaptive/adaptive_table_factory.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/binary_search_index_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_factory.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefetcher.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefix_index.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_hash_index.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_footer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/filter_block_reader_common.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/filter_policy.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/flush_block_policy.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/full_filter_block.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/hash_index_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/index_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/index_reader_common.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/parsed_full_filter_block.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_filter_block.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/reader_common.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/uncompression_dict_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_fetcher.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_factory.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/format.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/get_context.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/merging_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/compaction_merging_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/meta_blocks.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/persistent_cache_helper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_bloom.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_factory.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_index.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_key_coding.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/sst_file_dumper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/sst_file_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/sst_file_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/table_factory.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/table_properties.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/unique_id.cc
|
||||
${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc
|
||||
${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc
|
||||
${ROCKSDB_SOURCE_DIR}/test_util/transaction_test_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/block_cache_analyzer/block_cache_trace_analyzer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/dump/db_dump_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/io_tracer_parser_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/ldb_cmd.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record_handler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record_result.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/async_file_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/cleanable.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/coding.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/comparator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/compression.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/crc32c.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/data_structure.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/dynamic_bloom.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/hash.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/random.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/ribbon_config.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/slice.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/status.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/stderr_logger.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/string_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/thread_local.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/udt_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/write_batch_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/xxhash.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/agg_merge/agg_merge.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/backup/backup_engine.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_compaction_filter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/cache_dump_load.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/cache_dump_load_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/counted_fs.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/debug.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_secondary_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/sortlist.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend2.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/uint64add.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/object_registry.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/option_change_migration/option_change_migration.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/options/options_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_file.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_metadata.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/persistent_cache_tier.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_for_tiering_collector.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/trace/replayer_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/range_tree_lock_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/range_tree_lock_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction_db_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction_db.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/snapshot_checker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_base.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_db_mutex_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/types_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/wal_filter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/concurrent_tree.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/keyrange.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/lock_request.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/locktree.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/range_buffer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/treenode.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/txnid_set.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/wfg.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/standalone_port.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/dbt.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc
|
||||
build_version.cc)
|
||||
|
||||
if(HAVE_SSE42 AND NOT MSVC)
|
||||
set_source_files_properties(
|
||||
|
@ -533,15 +584,6 @@ list(APPEND SOURCES
|
|||
"${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/env/io_posix.cc")
|
||||
|
||||
if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
||||
list(APPEND SOURCES
|
||||
"${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/detail/Futex.cpp"
|
||||
"${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/AtomicNotification.cpp"
|
||||
"${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/DistributedMutex.cpp"
|
||||
"${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/ParkingLot.cpp"
|
||||
"${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/WaitOptions.cpp")
|
||||
endif()
|
||||
|
||||
set(ROCKSDB_STATIC_LIB rocksdb)
|
||||
|
||||
add_library(${ROCKSDB_STATIC_LIB} STATIC ${SOURCES})
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "rocksdb/version.h"
|
||||
#include "rocksdb/utilities/object_registry.h"
|
||||
#include "util/string_util.h"
|
||||
|
||||
// The build script may replace these values with real values based
|
||||
// on whether or not GIT is available and the platform settings
|
||||
static const std::string rocksdb_build_git_sha = "rocksdb_build_git_sha:72438a678872544809393b831c7273794c074215";
|
||||
static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:main";
|
||||
#define HAS_GIT_CHANGES 0
|
||||
#if HAS_GIT_CHANGES == 0
|
||||
// If HAS_GIT_CHANGES is 0, the GIT date is used.
|
||||
// Use the time the branch/tag was last modified
|
||||
static const std::string rocksdb_build_date = "rocksdb_build_date:2024-07-12 16:01:57";
|
||||
#else
|
||||
// If HAS_GIT_CHANGES is > 0, the branch/tag has modifications.
|
||||
// Use the time the build was created.
|
||||
static const std::string rocksdb_build_date = "rocksdb_build_date:2024-07-13 17:15:50";
|
||||
#endif
|
||||
|
||||
extern "C" {
|
||||
|
||||
} // extern "C"
|
||||
|
||||
std::unordered_map<std::string, ROCKSDB_NAMESPACE::RegistrarFunc> ROCKSDB_NAMESPACE::ObjectRegistry::builtins_ = {
|
||||
|
||||
};
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
static void AddProperty(std::unordered_map<std::string, std::string> *props, const std::string& name) {
|
||||
size_t colon = name.find(":");
|
||||
if (colon != std::string::npos && colon > 0 && colon < name.length() - 1) {
|
||||
// If we found a "@:", then this property was a build-time substitution that failed. Skip it
|
||||
size_t at = name.find("@", colon);
|
||||
if (at != colon + 1) {
|
||||
// Everything before the colon is the name, after is the value
|
||||
(*props)[name.substr(0, colon)] = name.substr(colon + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static std::unordered_map<std::string, std::string>* LoadPropertiesSet() {
|
||||
auto * properties = new std::unordered_map<std::string, std::string>();
|
||||
AddProperty(properties, rocksdb_build_git_sha);
|
||||
AddProperty(properties, rocksdb_build_git_tag);
|
||||
AddProperty(properties, rocksdb_build_date);
|
||||
return properties;
|
||||
}
|
||||
|
||||
const std::unordered_map<std::string, std::string>& GetRocksBuildProperties() {
|
||||
static std::unique_ptr<std::unordered_map<std::string, std::string>> props(LoadPropertiesSet());
|
||||
return *props;
|
||||
}
|
||||
|
||||
std::string GetRocksVersionAsString(bool with_patch) {
|
||||
std::string version = std::to_string(ROCKSDB_MAJOR) + "." + std::to_string(ROCKSDB_MINOR);
|
||||
if (with_patch) {
|
||||
return version + "." + std::to_string(ROCKSDB_PATCH);
|
||||
} else {
|
||||
return version;
|
||||
}
|
||||
}
|
||||
|
||||
std::string GetRocksBuildInfoAsString(const std::string& program, bool verbose) {
|
||||
std::string info = program + " (RocksDB) " + GetRocksVersionAsString(true);
|
||||
if (verbose) {
|
||||
for (const auto& it : GetRocksBuildProperties()) {
|
||||
info.append("\n ");
|
||||
info.append(it.first);
|
||||
info.append(": ");
|
||||
info.append(it.second);
|
||||
}
|
||||
}
|
||||
return info;
|
||||
}
|
||||
} // namespace ROCKSDB_NAMESPACE
|
|
@ -1,3 +0,0 @@
|
|||
const char* rocksdb_build_git_sha = "rocksdb_build_git_sha:0";
|
||||
const char* rocksdb_build_git_date = "rocksdb_build_git_date:2000-01-01";
|
||||
const char* rocksdb_build_compile_date = "2000-01-01";
|
|
@ -158,3 +158,4 @@ external_catalog_mgr:
|
|||
fdb:
|
||||
cluster_file: /config/fdb/cluster
|
||||
hdfs_addr: "hdfs://COMPOSE_PROJECT_NAME-hdfs-namenode:9000"
|
||||
hdfs_nnproxy: ""
|
||||
|
|
|
@ -157,3 +157,4 @@ external_catalog_mgr:
|
|||
type: fdb
|
||||
fdb:
|
||||
cluster_file: /config/fdb/cluster
|
||||
hdfs_nnproxy: ""
|
||||
|
|
|
@ -19,4 +19,4 @@ tso_service:
|
|||
send_timeout: 1800
|
||||
tso_window_ms: 3000
|
||||
tso_get_leader_info_interval_ms: 0
|
||||
cnch_config: "/config/cnch-config.yml"
|
||||
cnch_config: "/config/cnch-config.yml"
|
||||
|
|
|
@ -19,4 +19,4 @@ tso_service:
|
|||
send_timeout: 1800
|
||||
tso_window_ms: 3000
|
||||
tso_get_leader_info_interval_ms: 0
|
||||
cnch_config: "/config/cnch-config.yml"
|
||||
cnch_config: "/config/cnch-config.yml"
|
||||
|
|
|
@ -20,4 +20,4 @@ tso_service:
|
|||
send_timeout: 1800
|
||||
tso_window_ms: 3000
|
||||
tso_get_leader_info_interval_ms: 0
|
||||
cnch_config: "/config/cnch-config.yml"
|
||||
cnch_config: "/config/cnch-config.yml"
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
set(CLICKHOUSE_COPIER_SOURCES
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopierApp.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopier.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/ShardPartition.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/Internals.cpp")
|
||||
|
||||
set (CLICKHOUSE_COPIER_LINK
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
#include "ShardPartition.h"
|
||||
#include "ShardPartitionPiece.h"
|
||||
|
||||
namespace DB {
|
||||
ShardPartition::ShardPartition(TaskShard & parent, String name_quoted_, size_t number_of_splits)
|
||||
: task_shard(parent), name(std::move(name_quoted_))
|
||||
{
|
||||
pieces.reserve(number_of_splits);
|
||||
}
|
||||
}
|
|
@ -12,8 +12,7 @@ namespace DB
|
|||
/// This class describes a partition (name) that is stored on the shard (parent).
|
||||
struct ShardPartition
|
||||
{
|
||||
ShardPartition(TaskShard &parent, String name_quoted_, size_t number_of_splits = 10)
|
||||
: task_shard(parent), name(std::move(name_quoted_)) { pieces.reserve(number_of_splits); }
|
||||
ShardPartition(TaskShard & parent, String name_quoted_, size_t number_of_splits = 10);
|
||||
|
||||
String getPartitionPath() const;
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include "Aliases.h"
|
||||
#include "Internals.h"
|
||||
#include "ClusterPartition.h"
|
||||
#include "TaskCluster.h"
|
||||
|
||||
#include <Core/Defines.h>
|
||||
|
||||
|
@ -381,7 +382,7 @@ template<typename RandomEngine>
|
|||
inline void TaskTable::initShards(RandomEngine && random_engine)
|
||||
{
|
||||
const String & fqdn_name = getFQDNOrHostName();
|
||||
std::uniform_int_distribution<UInt8> get_urand(0, std::numeric_limits<UInt8>::max());
|
||||
std::uniform_int_distribution<uint8_t> get_urand(0, std::numeric_limits<uint8_t>::max());
|
||||
|
||||
// Compute the priority
|
||||
for (const auto & shard_info : cluster_pull->getShardsInfo())
|
||||
|
|
|
@ -163,11 +163,13 @@ void dumpMetadata(const std::string & key, const std::string & metadata)
|
|||
std::cout << formatDataModel<DB::Protos::DataModelPart>(metadata) << std::endl;
|
||||
else if (key.find("DLB_") != std::string::npos)
|
||||
std::cout << formatDataModel<DB::Protos::DataModelDeleteBitmap>(metadata) << std::endl;
|
||||
else
|
||||
else
|
||||
std::cout << metadata << std::endl;
|
||||
}
|
||||
else if (key.starts_with("MFSTS_"))
|
||||
std::cout << formatDataModel<DB::Protos::ManifestListModel>(metadata) << std::endl;
|
||||
else if (key.starts_with("SERVERS_TOPOLOGY"))
|
||||
std::cout << formatDataModel<DB::Protos::DataModelTopologyVersions>(metadata) << std::endl;
|
||||
else
|
||||
std::cout << metadata << std::endl;
|
||||
};
|
||||
|
@ -434,4 +436,3 @@ int mainEntryClickhouseMetaInspector(int argc, char ** argv)
|
|||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1141,7 +1141,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
|||
global_context->setMacros(std::make_unique<Macros>(*config, "macros", log));
|
||||
global_context->setExternalAuthenticatorsConfig(*config);
|
||||
|
||||
global_context->updateServerVirtualWarehouses(config);
|
||||
global_context->updateCnchTopologyManager(*config);
|
||||
|
||||
/// Setup protection to avoid accidental DROP for big tables (that are greater than 50 GB by default)
|
||||
if (config->has("max_table_size_to_drop"))
|
||||
|
@ -1371,6 +1371,11 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
|||
|
||||
HDFSConnectionParams hdfs_params = HDFSConnectionParams::parseHdfsFromConfig(global_context->getCnchConfigRef());
|
||||
global_context->setHdfsConnectionParams(hdfs_params);
|
||||
|
||||
// pre lookup and cache consult result to avoid the overhead of lookupNNProxy
|
||||
if (!hdfs_nnproxy.empty() && hdfs_params.conn_type == HDFSConnectionParams::CONN_NNPROXY)
|
||||
lookupNNProxy(hdfs_nnproxy);
|
||||
|
||||
#endif
|
||||
auto vetos_params = VETosConnectionParams::parseVeTosFromConfig(config());
|
||||
global_context->setVETosConnectParams(vetos_params);
|
||||
|
@ -1524,8 +1529,8 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
|||
LOG_WARNING(log, "Disable cnch part cache, which is strongly suggested for product use, since disable it may bring significant performace issue.");
|
||||
}
|
||||
|
||||
/// only server need start up server manager
|
||||
global_context->setCnchServerManager(config());
|
||||
/// only server need start up server leader
|
||||
global_context->setCnchServerLeader(config());
|
||||
|
||||
// size_t masking_policy_cache_size = config().getUInt64("mark_cache_size", 128);
|
||||
// size_t masking_policy_cache_lifetime = config().getUInt64("mark_cache_size_lifetime", 10000);
|
||||
|
|
|
@ -3,7 +3,7 @@ macro(configure_rustc)
|
|||
# NOTE: this can also be done by overriding rustc, but it not trivial with rustup.
|
||||
set(RUST_CFLAGS "${CMAKE_C_FLAGS} -Wno-reserved-id-macro")
|
||||
|
||||
set(CXX_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libcxx/include")
|
||||
set(CXX_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm-project/libcxx/include")
|
||||
set(RUST_CXXFLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-id-macro -Wno-comma -Wno-shadow-field-in-constructor -isystem ${CXX_INCLUDE_DIR} -nostdinc++")
|
||||
|
||||
if (CMAKE_OSX_SYSROOT)
|
||||
|
|
|
@ -81,10 +81,10 @@ private:
|
|||
bool getEntityModelByNameAndType(const String & name, EntityType type, AccessEntityModel &model) const;
|
||||
bool getEntryByUUID(const UUID &uuid, Entry &entry) const;
|
||||
bool getEntryByNameAndType(const String &name, EntityType type, Entry &entry) const;
|
||||
struct Entry * getEntryReferenceByUUID(const UUID &uuid) const TSA_REQUIRES(mutex);
|
||||
struct Entry * getEntryReferenceByUUID(const UUID &uuid) const;
|
||||
|
||||
mutable std::unordered_map<UUID, Entry> entries_by_id[SHARD_CNT] TSA_GUARDED_BY(mutex);
|
||||
mutable std::unordered_map<String, Entry *> entries_by_name_and_type[static_cast<size_t>(EntityType::MAX)][SHARD_CNT] TSA_GUARDED_BY(mutex);
|
||||
mutable std::unordered_map<UUID, Entry> entries_by_id[SHARD_CNT];
|
||||
mutable std::unordered_map<String, Entry *> entries_by_name_and_type[static_cast<size_t>(EntityType::MAX)][SHARD_CNT];
|
||||
mutable std::mutex mutex;
|
||||
|
||||
mutable std::list<OnChangedHandler> handlers_by_type[static_cast<size_t>(EntityType::MAX)] TSA_GUARDED_BY(hdl_mutex);
|
||||
|
|
|
@ -321,13 +321,6 @@ if (USE_PARQUET)
|
|||
endif()
|
||||
|
||||
set (DBMS_COMMON_LIBRARIES)
|
||||
# libgcc_s does not provide an implementation of an atomics library. Instead,
|
||||
# GCC's libatomic library can be used to supply these when using libgcc_s.
|
||||
if ((NOT USE_LIBCXX) AND COMPILER_CLANG AND OS_LINUX)
|
||||
list (APPEND DBMS_COMMON_LIBRARIES atomic)
|
||||
endif()
|
||||
|
||||
|
||||
# hash functions
|
||||
option(ENABLE_MULTITARGET_CODE "Enable platform-dependent code" ON)
|
||||
|
||||
|
@ -395,6 +388,14 @@ if (USE_EMBEDDED_COMPILER)
|
|||
dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${LLVM_INCLUDE_DIRS})
|
||||
endif ()
|
||||
|
||||
if (ENABLE_PROJ)
|
||||
dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${PROJ_INCLUDE_DIR} ${SQLITE3_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
if (ENABLE_GDAL)
|
||||
dbms_target_include_directories (SYSTEM BEFORE PUBLIC "${GDAL_INCLUDE_DIR}/include" "${GDAL_INCLUDE_DIR}/gcore" "${GDAL_INCLUDE_DIR}/port")
|
||||
endif()
|
||||
|
||||
# Otherwise it will slow down stack traces printing too much.
|
||||
set_source_files_properties(
|
||||
Common/Elf.cpp
|
||||
|
@ -442,6 +443,7 @@ target_link_libraries(clickhouse_common_io
|
|||
libmetrics2
|
||||
consistent-hashing
|
||||
ch_contrib::re2
|
||||
ch_contrib::llvm
|
||||
PRIVATE
|
||||
${UDNS_LIBRARIES}
|
||||
)
|
||||
|
@ -456,6 +458,7 @@ dbms_target_link_libraries(PUBLIC roaring)
|
|||
|
||||
if(USE_SIMDJSON)
|
||||
dbms_target_link_libraries(PRIVATE simdjson)
|
||||
|
||||
endif()
|
||||
|
||||
if (USE_ZTI_LIBRARY)
|
||||
|
|
|
@ -1873,10 +1873,12 @@ namespace Catalog
|
|||
const TxnTimestamp & ts,
|
||||
const Context * session_context,
|
||||
const VisibilityLevel visibility,
|
||||
const std::set<Int64> & bucket_numbers)
|
||||
const std::set<Int64> & bucket_numbers,
|
||||
const bool disable_cache)
|
||||
{
|
||||
ServerDataPartsWithDBM res;
|
||||
res.first = getServerDataPartsInPartitions(storage, partitions, ts, session_context, VisibilityLevel::All, bucket_numbers);
|
||||
res.first
|
||||
= getServerDataPartsInPartitions(storage, partitions, ts, session_context, VisibilityLevel::All, bucket_numbers, disable_cache);
|
||||
|
||||
if (res.first.empty())
|
||||
return res;
|
||||
|
@ -1884,7 +1886,13 @@ namespace Catalog
|
|||
bool is_unique_table = storage->getInMemoryMetadataPtr()->hasUniqueKey();
|
||||
if (is_unique_table)
|
||||
res.second = getDeleteBitmapsInPartitions(
|
||||
storage, {partitions.begin(), partitions.end()}, ts, /*session_context=*/nullptr, VisibilityLevel::All, bucket_numbers);
|
||||
storage,
|
||||
{partitions.begin(), partitions.end()},
|
||||
ts,
|
||||
/*session_context=*/nullptr,
|
||||
VisibilityLevel::All,
|
||||
bucket_numbers,
|
||||
disable_cache);
|
||||
|
||||
/// Make sure they use the same records of transactions list.
|
||||
if (ts && visibility != VisibilityLevel::All)
|
||||
|
@ -1933,7 +1941,8 @@ namespace Catalog
|
|||
const TxnTimestamp & ts,
|
||||
const Context * session_context,
|
||||
const VisibilityLevel visibility,
|
||||
const std::set<Int64> & bucket_numbers)
|
||||
const std::set<Int64> & bucket_numbers,
|
||||
const bool disable_cache)
|
||||
{
|
||||
ServerDataPartsVector res;
|
||||
String source;
|
||||
|
@ -1973,7 +1982,7 @@ namespace Catalog
|
|||
context.getServerType() == ServerType::cnch_server
|
||||
&& isLocalServer(host_with_rpc, std::to_string(context.getRPCPort())))
|
||||
{
|
||||
bool can_use_cache = canUseCache(storage, session_context);
|
||||
bool can_use_cache = canUseCache(storage, session_context, disable_cache);
|
||||
|
||||
if (!can_use_cache)
|
||||
{
|
||||
|
@ -2090,7 +2099,8 @@ namespace Catalog
|
|||
const TxnTimestamp & ts,
|
||||
const Context * session_context,
|
||||
const VisibilityLevel visibility,
|
||||
const std::set<Int64> & bucket_numbers)
|
||||
const std::set<Int64> & bucket_numbers,
|
||||
const bool disable_cache)
|
||||
{
|
||||
DeleteBitmapMetaPtrVector res;
|
||||
String source;
|
||||
|
@ -2133,7 +2143,7 @@ namespace Catalog
|
|||
context.getServerType() == ServerType::cnch_server
|
||||
&& isLocalServer(host_with_rpc, std::to_string(context.getRPCPort())))
|
||||
{
|
||||
bool can_use_cache = canUseCache(storage, session_context);
|
||||
bool can_use_cache = canUseCache(storage, session_context, disable_cache);
|
||||
can_use_cache &= !context.getConfigRef().getBool("disable_delete_bitmap_cache", false);
|
||||
|
||||
if (!can_use_cache)
|
||||
|
@ -2723,8 +2733,10 @@ namespace Catalog
|
|||
meta_proxy->setNonHostUpdateTimeStamp(name_space, UUIDHelpers::UUIDToString(storage->getStorageID().uuid), current_pts);
|
||||
}
|
||||
|
||||
bool Catalog::canUseCache(const ConstStoragePtr & storage, const Context * session_context)
|
||||
bool Catalog::canUseCache(const ConstStoragePtr & storage, const Context * session_context, const bool disable_cache)
|
||||
{
|
||||
if (disable_cache)
|
||||
return false;
|
||||
if (!context.getPartCacheManager())
|
||||
return false;
|
||||
if (context.getSettingsRef().server_write_ha)
|
||||
|
@ -5650,42 +5662,9 @@ namespace Catalog
|
|||
}
|
||||
|
||||
PartitionMetrics::PartitionMetricsStore Catalog::getPartitionMetricsStoreFromMetastore(
|
||||
const String & table_uuid, const String & partition_id, size_t max_commit_time, std::function<bool()> need_abort)
|
||||
const String & table_uuid, const String & partition_id, size_t max_commit_time, std::function<bool()>)
|
||||
|
||||
{
|
||||
auto calculate_metrics_by_partition = [&](ServerDataPartsVector & parts) {
|
||||
PartitionMetricsStorePtr res = std::make_shared<PartitionMetrics::PartitionMetricsStore>();
|
||||
|
||||
for (auto & part : parts)
|
||||
{
|
||||
if (unlikely(need_abort()))
|
||||
{
|
||||
LOG_WARNING(log, "getPartitionMetricsStoreFromMetastore is aborted by caller.");
|
||||
break;
|
||||
}
|
||||
|
||||
/// For those blocks only have deleted part, just ignore them because the covered part may be already removed by GC.
|
||||
/// But we should still calculate it's `last_modification_time`.
|
||||
res->updateLastModificationTime(part->part_model());
|
||||
}
|
||||
|
||||
std::sort(parts.begin(), parts.end(), CnchPartsHelper::PartComparator<ServerDataPartPtr>{});
|
||||
auto visible_parts = CnchPartsHelper::calcVisibleParts(parts, false);
|
||||
|
||||
for (auto & part : visible_parts)
|
||||
{
|
||||
if (unlikely(need_abort()))
|
||||
{
|
||||
LOG_WARNING(log, "getPartitionMetricsStoreFromMetastore is aborted by caller.");
|
||||
break;
|
||||
}
|
||||
|
||||
res->update(part->part_model());
|
||||
}
|
||||
|
||||
return res;
|
||||
};
|
||||
|
||||
PartitionMetrics::PartitionMetricsStore ret;
|
||||
runWithMetricSupport(
|
||||
[&] {
|
||||
|
@ -5697,36 +5676,25 @@ namespace Catalog
|
|||
|
||||
/// Get latest table version.
|
||||
StoragePtr storage = getTableByUUID(context, table_uuid, max_commit_time);
|
||||
const auto & merge_tree_storage = dynamic_cast<const MergeTreeMetaBase &>(*storage);
|
||||
const auto & merge_tree_storage = dynamic_cast<const StorageCnchMergeTree &>(*storage);
|
||||
|
||||
IMetaStore::IteratorPtr it = meta_proxy->getPartsInRange(name_space, table_uuid, partition_id);
|
||||
/// Do not use cached parts, because this is not a user query.
|
||||
ServerDataPartsWithDBM parts_with_dbm = getServerDataPartsInPartitionsWithDBM(
|
||||
storage, {partition_id}, max_commit_time, nullptr, VisibilityLevel::Committed, {}, true);
|
||||
LOG_TRACE(
|
||||
log,
|
||||
"getPartitionMetricsStoreFromMetastore for table {} partition {} get parts: {}, bitmaps: {}",
|
||||
table_uuid,
|
||||
partition_id,
|
||||
parts_with_dbm.first.size(),
|
||||
parts_with_dbm.second.size());
|
||||
|
||||
ServerDataPartsVector parts;
|
||||
while (it->next())
|
||||
if (parts_with_dbm.first.empty())
|
||||
{
|
||||
if (unlikely(need_abort()))
|
||||
{
|
||||
LOG_WARNING(log, "getPartitionMetricsStoreFromMetastore is aborted by caller.");
|
||||
break;
|
||||
}
|
||||
Protos::DataModelPart part_model;
|
||||
part_model.ParseFromString(it->value());
|
||||
|
||||
/// Skip the Uncommitted parts or the parts that
|
||||
/// cannot be seen by the time `max_commit_time`.
|
||||
if (part_model.commit_time() == 0 || part_model.commit_time() > max_commit_time)
|
||||
{
|
||||
LOG_TRACE(log, "Skip parts: {}, max_commit_time: {}", part_model.ShortDebugString(), max_commit_time);
|
||||
continue;
|
||||
}
|
||||
|
||||
parts.emplace_back(std::make_shared<ServerDataPart>(createPartWrapperFromModel(merge_tree_storage, std::move(part_model))));
|
||||
return;
|
||||
}
|
||||
|
||||
if (!parts.empty())
|
||||
{
|
||||
ret = *calculate_metrics_by_partition(parts);
|
||||
}
|
||||
ret = PartitionMetrics::PartitionMetricsStore(parts_with_dbm, merge_tree_storage);
|
||||
},
|
||||
ProfileEvents::GetPartitionMetricsFromMetastoreSuccess,
|
||||
ProfileEvents::GetPartitionMetricsFromMetastoreFailed);
|
||||
|
|
|
@ -313,7 +313,8 @@ public:
|
|||
const TxnTimestamp & ts,
|
||||
const Context * session_context,
|
||||
VisibilityLevel visibility = VisibilityLevel::Visible,
|
||||
const std::set<Int64> & bucket_numbers = {});
|
||||
const std::set<Int64> & bucket_numbers = {},
|
||||
bool disable_cache = false);
|
||||
|
||||
/// @param bucket_numbers If empty fetch all bucket_numbers, otherwise fetch the given bucket_numbers.
|
||||
ServerDataPartsVector getServerDataPartsInPartitions(
|
||||
|
@ -322,7 +323,8 @@ public:
|
|||
const TxnTimestamp & ts,
|
||||
const Context * session_context,
|
||||
VisibilityLevel visibility = VisibilityLevel::Visible,
|
||||
const std::set<Int64> & bucket_numbers = {});
|
||||
const std::set<Int64> & bucket_numbers = {},
|
||||
bool disable_cache = false);
|
||||
|
||||
ServerDataPartsWithDBM getTrashedPartsInPartitionsWithDBM(const ConstStoragePtr & storage, const Strings & partitions, const TxnTimestamp & ts);
|
||||
|
||||
|
@ -354,7 +356,8 @@ public:
|
|||
const TxnTimestamp & ts,
|
||||
const Context * session_context = nullptr,
|
||||
VisibilityLevel visibility = VisibilityLevel::Visible,
|
||||
const std::set<Int64> & bucket_numbers = {});
|
||||
const std::set<Int64> & bucket_numbers = {},
|
||||
bool disable_cache = false);
|
||||
DeleteBitmapMetaPtrVector getDeleteBitmapsInPartitionsFromMetastore(
|
||||
const ConstStoragePtr & storage, const Strings & partitions, const TxnTimestamp & ts, VisibilityLevel visibility = VisibilityLevel::Visible);
|
||||
DeleteBitmapMetaPtrVector getTrashedDeleteBitmapsInPartitions(
|
||||
|
@ -1016,7 +1019,7 @@ private:
|
|||
|
||||
void mayUpdateUHUT(const StoragePtr & storage);
|
||||
|
||||
bool canUseCache(const ConstStoragePtr & storage, const Context * session_context);
|
||||
bool canUseCache(const ConstStoragePtr & storage, const Context * session_context, bool disable_cache);
|
||||
|
||||
void finishCommitInBatch(
|
||||
const StoragePtr & storage,
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#include <Catalog/CatalogBackgroundTask.h>
|
||||
#include <Catalog/MetastoreProxy.h>
|
||||
#include <Catalog/LargeKVHandler.h>
|
||||
#include <MergeTreeCommon/CnchServerManager.h>
|
||||
#include <MergeTreeCommon/CnchServerLeader.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
|
@ -65,7 +65,7 @@ void CatalogBackgroundTask::execute()
|
|||
void CatalogBackgroundTask::cleanStaleLargeKV()
|
||||
{
|
||||
// only leader can execute clean job
|
||||
if (!context->getCnchServerManager()->isLeader())
|
||||
if (!context->getCnchServerLeader()->isLeader())
|
||||
return;
|
||||
|
||||
// scan large kv records
|
||||
|
@ -98,7 +98,7 @@ void CatalogBackgroundTask::cleanStaleLargeKV()
|
|||
auto large_kv_it = metastore->getByPrefix(MetastoreProxy::largeKVDataPrefix(name_space, uuid));
|
||||
while (large_kv_it->next())
|
||||
batch_write.AddDelete(large_kv_it->key());
|
||||
|
||||
|
||||
batch_write.AddDelete(MetastoreProxy::largeKVReferenceKey(name_space, uuid));
|
||||
|
||||
try
|
||||
|
|
|
@ -76,6 +76,8 @@ class DataPartInterface
|
|||
{
|
||||
public:
|
||||
virtual bool isServerDataPart() const = 0;
|
||||
DataPartInterface() = default;
|
||||
DataPartInterface(const DataPartInterface &) = default;
|
||||
virtual ~DataPartInterface() = default;
|
||||
};
|
||||
|
||||
|
|
|
@ -149,6 +149,7 @@ ManipulationTaskRecord::~ManipulationTaskRecord()
|
|||
{
|
||||
if (!try_execute && !parts.empty())
|
||||
{
|
||||
UInt64 source_parts_rows = 0;
|
||||
std::lock_guard lock(parent.currently_merging_mutating_parts_mutex);
|
||||
for (auto & part : parts)
|
||||
{
|
||||
|
@ -159,6 +160,13 @@ ManipulationTaskRecord::~ManipulationTaskRecord()
|
|||
parent.currently_merging_mutating_parts.erase(prev_part->name());
|
||||
prev_part = prev_part->tryGetPreviousPart();
|
||||
}
|
||||
source_parts_rows += part->rowsCount();
|
||||
}
|
||||
if (!parts.empty())
|
||||
{
|
||||
auto it = parent.merging_mutating_tasks_rows.try_emplace(parts.front()->info().partition_id, 0, 0).first;
|
||||
it->second.first -= std::min(it->second.first, 1UL);
|
||||
it->second.second -= std::min(it->second.second, source_parts_rows);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -199,6 +207,7 @@ FutureManipulationTask::~FutureManipulationTask()
|
|||
{
|
||||
if (!try_execute && !parts.empty())
|
||||
{
|
||||
UInt64 source_parts_rows = 0;
|
||||
std::lock_guard lock(parent.currently_merging_mutating_parts_mutex);
|
||||
for (auto & part : parts)
|
||||
{
|
||||
|
@ -209,6 +218,13 @@ FutureManipulationTask::~FutureManipulationTask()
|
|||
parent.currently_merging_mutating_parts.erase(prev_part->name());
|
||||
prev_part = prev_part->tryGetPreviousPart();
|
||||
}
|
||||
source_parts_rows += part->rowsCount();
|
||||
}
|
||||
if (!parts.empty())
|
||||
{
|
||||
auto it = parent.merging_mutating_tasks_rows.try_emplace(parts.front()->info().partition_id, 0, 0).first;
|
||||
it->second.first -= std::min(it->second.first, 1UL);
|
||||
it->second.second -= std::min(it->second.second, source_parts_rows);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -229,6 +245,7 @@ FutureManipulationTask & FutureManipulationTask::tagSourceParts(ServerDataPartsV
|
|||
|
||||
if (!record->try_execute)
|
||||
{
|
||||
UInt64 source_parts_rows = 0;
|
||||
std::lock_guard lock(parent.currently_merging_mutating_parts_mutex);
|
||||
|
||||
for (const auto & p : parts_)
|
||||
|
@ -241,6 +258,13 @@ FutureManipulationTask & FutureManipulationTask::tagSourceParts(ServerDataPartsV
|
|||
check_and_add(prev_part->name());
|
||||
prev_part = prev_part->tryGetPreviousPart();
|
||||
}
|
||||
source_parts_rows += p->rowsCount();
|
||||
}
|
||||
if (!parts_.empty())
|
||||
{
|
||||
auto it = parent.merging_mutating_tasks_rows.try_emplace(parts_.front()->info().partition_id, 0, 0).first;
|
||||
it->second.first++;
|
||||
it->second.second += source_parts_rows;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -672,7 +696,7 @@ bool CnchMergeMutateThread::tryMergeParts(StoragePtr & istorage, StorageCnchMerg
|
|||
void removeUnselectableParts(
|
||||
ServerDataPartsVector & visible_parts,
|
||||
NameSet & merging_mutating_parts_snapshot,
|
||||
std::multimap<String, UInt64> & unselectable_part_rows,
|
||||
std::unordered_map<String, std::pair<UInt64, UInt64> > & unselectable_part_rows,
|
||||
UInt64 max_bytes,
|
||||
UInt64 max_rows)
|
||||
{
|
||||
|
@ -681,13 +705,18 @@ void removeUnselectableParts(
|
|||
visible_parts.begin(),
|
||||
visible_parts.end(),
|
||||
[&merging_mutating_parts_snapshot, &unselectable_part_rows, max_bytes, max_rows](const auto & p) {
|
||||
if (merging_mutating_parts_snapshot.erase(p->name())
|
||||
|| p->part_model().rows_count() >= max_rows * 0.9
|
||||
|| p->part_model().size() >= max_bytes * 0.9)
|
||||
if (merging_mutating_parts_snapshot.erase(p->name()))
|
||||
return true;
|
||||
|
||||
if (p->part_model().rows_count() >= max_rows * 0.9 || p->part_model().size() >= max_bytes * 0.9)
|
||||
{
|
||||
unselectable_part_rows.emplace(p->info().partition_id, p->part_model().rows_count());
|
||||
auto it = unselectable_part_rows.try_emplace(p->info().partition_id, 0, 0).first;
|
||||
it->second.first++;
|
||||
it->second.second += p->part_model().rows_count();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}),
|
||||
visible_parts.end()
|
||||
|
@ -813,7 +842,7 @@ bool CnchMergeMutateThread::trySelectPartsToMerge(StoragePtr & istorage, Storage
|
|||
/// TODO: support checkpoints
|
||||
|
||||
/// Used to calculate total rows of each partition so we can prevent generating huge merge tasks.
|
||||
std::multimap<String, UInt64> unselectable_part_rows;
|
||||
auto unselectable_part_rows = copyCurrentlyMergingMutatingTasksRows();
|
||||
|
||||
auto max_bytes = std::min(
|
||||
storage_settings->cnch_merge_max_total_bytes_to_merge.value,
|
||||
|
@ -833,14 +862,6 @@ bool CnchMergeMutateThread::trySelectPartsToMerge(StoragePtr & istorage, Storage
|
|||
return false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (const auto & p: visible_parts)
|
||||
{
|
||||
if (merging_mutating_parts_snapshot.count(p->name()) > 0)
|
||||
unselectable_part_rows.emplace(p->info().partition_id, p->part_model().rows_count());
|
||||
}
|
||||
}
|
||||
|
||||
metrics.num_legal_visible_parts = visible_parts.size();
|
||||
|
||||
|
@ -1143,9 +1164,9 @@ String CnchMergeMutateThread::triggerPartMerge(
|
|||
mutation_timestamps.reserve(mutation_entries.size());
|
||||
for (const auto & [_, mutation_entry] : mutation_entries)
|
||||
mutation_timestamps.emplace_back(mutation_entry.commit_time, mutation_entry.commands.changeSchema());
|
||||
|
||||
|
||||
/// Used to calculate total rows of each partition so we can prevent generating huge merge tasks.
|
||||
std::multimap<String, UInt64> unselectable_part_rows;
|
||||
auto unselectable_part_rows = copyCurrentlyMergingMutatingTasksRows();
|
||||
|
||||
auto & storage = checkAndGetCnchTable(istorage);
|
||||
auto storage_settings = storage.getSettings();
|
||||
|
@ -1162,14 +1183,6 @@ String CnchMergeMutateThread::triggerPartMerge(
|
|||
auto max_rows = storage_settings->cnch_merge_max_total_rows_to_merge.value;
|
||||
removeUnselectableParts(visible_parts, merging_mutating_parts_snapshot, unselectable_part_rows, max_bytes, max_rows);
|
||||
}
|
||||
else
|
||||
{
|
||||
for (const auto & p: visible_parts)
|
||||
{
|
||||
if (merging_mutating_parts_snapshot.count(p->name()) > 0)
|
||||
unselectable_part_rows.emplace(p->info().partition_id, p->part_model().rows_count());
|
||||
}
|
||||
}
|
||||
|
||||
/// Step 4: create merge predicate
|
||||
auto can_merge_callback = getMergePred(merging_mutating_parts_snapshot, mutation_timestamps);
|
||||
|
|
|
@ -57,7 +57,7 @@ struct ManipulationTaskRecord
|
|||
|
||||
/// Set task_record's commit_start_time once it go into txn commit stage.
|
||||
/// There are some other operations may be conflict with merge.
|
||||
/// 1. DROP PARTITION - get the current max block id and generate a DropRange part.
|
||||
/// 1. DROP PARTITION - get the current max block id and generate a DropRange part.
|
||||
/// Need to cancel merge tasks before getting data parts.
|
||||
/// 2. INGEST PARTITION - generate new content based on current source parts.
|
||||
/// Need to cancel merge tasks and suspend the merge process before INGEST PARTITION finish.
|
||||
|
@ -230,10 +230,18 @@ private:
|
|||
return currently_merging_mutating_parts;
|
||||
}
|
||||
|
||||
std::unordered_map<String, std::pair<UInt64, UInt64> > copyCurrentlyMergingMutatingTasksRows()
|
||||
{
|
||||
std::lock_guard lock(currently_merging_mutating_parts_mutex);
|
||||
return merging_mutating_tasks_rows;
|
||||
}
|
||||
|
||||
Strings removeLockedPartition(const Strings & partitions);
|
||||
|
||||
std::mutex currently_merging_mutating_parts_mutex;
|
||||
NameSet currently_merging_mutating_parts;
|
||||
/// partition_id -> {future_parts_number, future_part_rows}
|
||||
std::unordered_map<String, std::pair<UInt64, UInt64> > merging_mutating_tasks_rows;
|
||||
|
||||
std::condition_variable currently_synchronous_tasks_cv; /// for waitTasksFinish function
|
||||
std::mutex currently_synchronous_tasks_mutex;
|
||||
|
|
|
@ -245,7 +245,7 @@ namespace
|
|||
if (Operation::isRangeTombstone(prev))
|
||||
{
|
||||
/// Sort will place range tombstones consecutively at the beginning of each partition.
|
||||
/// We'll record theri boundaries during iteration and process them when reaching partition end
|
||||
/// We'll record their boundaries during iteration and process them when reaching partition end
|
||||
if (range_tombstone_beg_it == end)
|
||||
range_tombstone_beg_it = prev_it;
|
||||
range_tombstone_end_it = std::next(prev_it);
|
||||
|
@ -336,7 +336,8 @@ namespace
|
|||
bool skip_drop_ranges,
|
||||
Vec * visible_alone_drop_ranges,
|
||||
Vec * invisible_dropped_parts,
|
||||
LoggingOption logging)
|
||||
LoggingOption logging,
|
||||
Vec * invisible_parts = nullptr)
|
||||
{
|
||||
using Part = typename Vec::value_type;
|
||||
|
||||
|
@ -348,7 +349,10 @@ namespace
|
|||
if (all_parts.size() == 1)
|
||||
{
|
||||
if (skip_drop_ranges && all_parts.front()->get_deleted())
|
||||
; /// do nothing
|
||||
{
|
||||
if (invisible_parts)
|
||||
*invisible_parts = all_parts;
|
||||
}
|
||||
else
|
||||
visible_parts = all_parts;
|
||||
|
||||
|
@ -357,8 +361,8 @@ namespace
|
|||
return visible_parts;
|
||||
}
|
||||
|
||||
auto process_parts = [&](Vec & parts, size_t begin_pos, size_t end_pos, Vec & visible_parts_)
|
||||
{
|
||||
auto process_parts = [&](Vec & parts, size_t begin_pos, size_t end_pos, Vec & visible_parts_) {
|
||||
/// NOTE! BSPScheduler relies on the same sort algorithm to estimate correct rows for table scan tasks
|
||||
pdqsort(parts.begin() + begin_pos, parts.begin() + end_pos, PartComparator<Part>{});
|
||||
|
||||
/// One-pass algorithm to construct delta chains
|
||||
|
@ -379,6 +383,9 @@ namespace
|
|||
/// i) curr_part is also a DROP RANGE mark, and must be the bigger one
|
||||
if ((*curr_it)->get_info().level == MergeTreePartInfo::MAX_LEVEL)
|
||||
{
|
||||
if (invisible_parts)
|
||||
invisible_parts->push_back(*prev_it);
|
||||
|
||||
if (invisible_dropped_parts)
|
||||
invisible_dropped_parts->push_back(*prev_it);
|
||||
|
||||
|
@ -395,6 +402,9 @@ namespace
|
|||
/// ii) curr_part is marked as dropped by prev_part
|
||||
else if ((*curr_it)->get_info().max_block <= prev_part->get_info().max_block)
|
||||
{
|
||||
if (invisible_parts)
|
||||
invisible_parts->push_back(*curr_it);
|
||||
|
||||
if (invisible_dropped_parts)
|
||||
invisible_dropped_parts->push_back(*curr_it);
|
||||
|
||||
|
@ -412,7 +422,10 @@ namespace
|
|||
/// c) different partition
|
||||
|
||||
if (skip_drop_ranges)
|
||||
; /// do nothing
|
||||
{
|
||||
if (invisible_parts)
|
||||
invisible_parts->push_back(prev_part);
|
||||
}
|
||||
else
|
||||
visible_parts_.push_back(prev_part);
|
||||
|
||||
|
@ -430,7 +443,11 @@ namespace
|
|||
else
|
||||
{
|
||||
if (skip_drop_ranges && prev_part->get_deleted())
|
||||
; /// do nothing
|
||||
{
|
||||
/// do nothing
|
||||
if (invisible_parts)
|
||||
invisible_parts->push_back(prev_part);
|
||||
}
|
||||
else
|
||||
visible_parts_.push_back(prev_part);
|
||||
|
||||
|
@ -550,9 +567,10 @@ MergeTreeDataPartsVector calcVisibleParts(MergeTreeDataPartsVector & all_parts,
|
|||
return calcVisiblePartsImpl<MergeTreeDataPartsVector>(all_parts, flatten, /* skip_drop_ranges */ true, nullptr, nullptr, logging);
|
||||
}
|
||||
|
||||
ServerDataPartsVector calcVisibleParts(ServerDataPartsVector & all_parts, bool flatten, LoggingOption logging)
|
||||
ServerDataPartsVector calcVisibleParts(ServerDataPartsVector & all_parts, bool flatten, LoggingOption logging, ServerDataPartsVector * invisible_parts)
|
||||
{
|
||||
return calcVisiblePartsImpl<ServerDataPartsVector>(all_parts, flatten, /* skip_drop_ranges */ true, nullptr, nullptr, logging);
|
||||
return calcVisiblePartsImpl<ServerDataPartsVector>(
|
||||
all_parts, flatten, /* skip_drop_ranges */ true, nullptr, nullptr, logging, invisible_parts);
|
||||
}
|
||||
|
||||
MergeTreeDataPartsCNCHVector calcVisibleParts(MergeTreeDataPartsCNCHVector & all_parts, bool flatten, LoggingOption logging)
|
||||
|
|
|
@ -114,7 +114,11 @@ IMergeTreeDataPartsVector toIMergeTreeDataPartsVector(const MergeTreeDataPartsCN
|
|||
MergeTreeDataPartsCNCHVector toMergeTreeDataPartsCNCHVector(const IMergeTreeDataPartsVector & vec);
|
||||
|
||||
MergeTreeDataPartsVector calcVisibleParts(MergeTreeDataPartsVector & all_parts, bool flatten, LoggingOption logging = DisableLogging);
|
||||
ServerDataPartsVector calcVisibleParts(ServerDataPartsVector & all_parts, bool flatten, LoggingOption logging = DisableLogging);
|
||||
ServerDataPartsVector calcVisibleParts(
|
||||
ServerDataPartsVector & all_parts,
|
||||
bool flatten,
|
||||
LoggingOption logging = DisableLogging,
|
||||
ServerDataPartsVector * invisible_parts = nullptr);
|
||||
MergeTreeDataPartsCNCHVector calcVisibleParts(MergeTreeDataPartsCNCHVector & all_parts, bool flatten, LoggingOption logging = DisableLogging);
|
||||
IMergeTreeDataPartsVector calcVisibleParts(IMergeTreeDataPartsVector& all_parts,
|
||||
bool collect_on_chain, bool skip_drop_ranges, IMergeTreeDataPartsVector* visible_alone_drop_ranges,
|
||||
|
|
|
@ -932,7 +932,7 @@ std::set<UUID> CnchServerClient::getDeletingTablesInGlobalGC()
|
|||
return res;
|
||||
}
|
||||
|
||||
bool CnchServerClient::removeMergeMutateTasksOnPartitions(const StorageID & storage_id, const std::unordered_set<String> & partitions)
|
||||
bool CnchServerClient::removeMergeMutateTasksOnPartitions(const StorageID & storage_id, const std::unordered_set<String> & partitions, UInt64 timeout_ms)
|
||||
{
|
||||
auto timer = ProfileEventsTimer(ProfileEvents::ServerRpcRequest, ProfileEvents::ServerRpcElaspsedMicroseconds);
|
||||
brpc::Controller cntl;
|
||||
|
@ -941,6 +941,8 @@ bool CnchServerClient::removeMergeMutateTasksOnPartitions(const StorageID & stor
|
|||
for (const auto & p : partitions)
|
||||
request.add_partitions(p);
|
||||
Protos::RemoveMergeMutateTasksOnPartitionsResp response;
|
||||
if (timeout_ms)
|
||||
cntl.set_timeout_ms(std::min(timeout_ms, 30000ul));
|
||||
|
||||
stub->removeMergeMutateTasksOnPartitions(&cntl, &request, &response, nullptr);
|
||||
|
||||
|
|
|
@ -179,7 +179,7 @@ public:
|
|||
*/
|
||||
void cleanUndoBuffers(const TransactionRecord & txn_record);
|
||||
std::set<UUID> getDeletingTablesInGlobalGC();
|
||||
bool removeMergeMutateTasksOnPartitions(const StorageID &, const std::unordered_set<String> &);
|
||||
bool removeMergeMutateTasksOnPartitions(const StorageID &, const std::unordered_set<String> &, UInt64 timeout_ms = 0);
|
||||
|
||||
void acquireLock(const LockInfoPtr & info);
|
||||
void releaseLock(const LockInfoPtr & info);
|
||||
|
|
|
@ -402,15 +402,12 @@ void CnchServerResource::sendResources(const ContextPtr & context, std::optional
|
|||
if (all_resources.empty())
|
||||
return;
|
||||
|
||||
if (resource_option)
|
||||
initSourceTaskPayload(context, all_resources);
|
||||
|
||||
Stopwatch rpc_watch;
|
||||
auto worker_group_status = context->getWorkerGroupStatusPtr();
|
||||
auto handler = std::make_shared<ExceptionHandlerWithFailedInfo>();
|
||||
if (worker_group_status && worker_group_status->needCheckHalfOpenWorker())
|
||||
handler->setNeedRecord();
|
||||
|
||||
|
||||
std::vector<brpc::CallId> call_ids;
|
||||
call_ids.reserve(all_resources.size());
|
||||
|
||||
|
@ -671,6 +668,8 @@ void CnchServerResource::allocateResource(
|
|||
if (auto it = assigned_map.find(host_ports.id); it != assigned_map.end())
|
||||
{
|
||||
assigned_parts = std::move(it->second);
|
||||
if (resource_option)
|
||||
initSourceTaskPayload(context, storage, host_ports, assigned_parts);
|
||||
CnchPartsHelper::flattenPartsVector(assigned_parts);
|
||||
LOG_TRACE(
|
||||
log,
|
||||
|
@ -759,36 +758,18 @@ void CnchServerResource::allocateResource(
|
|||
}
|
||||
|
||||
void CnchServerResource::initSourceTaskPayload(
|
||||
const ContextPtr & context, std::unordered_map<HostWithPorts, std::vector<AssignedResource>> & all_resources)
|
||||
const ContextPtr & context, StoragePtr storage, const HostWithPorts & host_with_ports, ServerDataPartsVector & visible_parts)
|
||||
{
|
||||
for (const auto & [host_ports, assinged_resource] : all_resources)
|
||||
auto uuid = storage->getStorageID().uuid;
|
||||
for (const auto & p : visible_parts)
|
||||
{
|
||||
for (const auto & r : assinged_resource)
|
||||
{
|
||||
auto uuid = r.storage->getStorageID().uuid;
|
||||
bool reclustered = r.storage->isTableClustered(context);
|
||||
for (const auto & p : r.server_parts)
|
||||
{
|
||||
auto bucket_number = getBucketNumberOrInvalid(p->part_model_wrapper->bucketNumber(), reclustered);
|
||||
auto addr = AddressInfo(host_ports.getHost(), host_ports.getTCPPort(), "", "", host_ports.exchange_port);
|
||||
source_task_payload[uuid][addr].part_num += 1;
|
||||
source_task_payload[uuid][addr].rows += p->rowExistsCount();
|
||||
source_task_payload[uuid][addr].buckets.insert(bucket_number);
|
||||
}
|
||||
if (log->trace())
|
||||
{
|
||||
for (const auto & [addr, payload] : source_task_payload[uuid])
|
||||
{
|
||||
LOG_TRACE(
|
||||
log,
|
||||
"Source task payload for {}.{} addr:{} is {}",
|
||||
r.storage->getDatabaseName(),
|
||||
r.storage->getTableName(),
|
||||
addr.toShortString(),
|
||||
payload.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
bool reclustered = storage->isTableClustered(context);
|
||||
auto bucket_number = getBucketNumberOrInvalid(p->part_model_wrapper->bucketNumber(), reclustered);
|
||||
auto addr = AddressInfo(host_with_ports.getHost(), host_with_ports.getTCPPort(), "", "", host_with_ports.getRPCPort());
|
||||
source_task_payload[uuid][addr].part_num += 1;
|
||||
source_task_payload[uuid][addr].rows += p->rowExistsCount();
|
||||
source_task_payload[uuid][addr].visible_parts.push_back(p);
|
||||
source_task_payload[uuid][addr].buckets.insert(bucket_number);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -241,7 +241,7 @@ public:
|
|||
}
|
||||
|
||||
private:
|
||||
auto getLock() const
|
||||
auto getLock() const TSA_NO_THREAD_SAFETY_ANALYSIS
|
||||
{
|
||||
return std::lock_guard(mutex);
|
||||
}
|
||||
|
@ -256,8 +256,8 @@ private:
|
|||
std::lock_guard<std::mutex> &,
|
||||
std::optional<ResourceOption> resource_option = std::nullopt);
|
||||
|
||||
void
|
||||
initSourceTaskPayload(const ContextPtr & context, std::unordered_map<HostWithPorts, std::vector<AssignedResource>> & all_resources);
|
||||
void initSourceTaskPayload(
|
||||
const ContextPtr & context, StoragePtr storage, const HostWithPorts & host_with_ports, ServerDataPartsVector & visible_parts);
|
||||
|
||||
void sendCreateQueries(const ContextPtr & context);
|
||||
void sendDataParts(const ContextPtr & context);
|
||||
|
|
|
@ -73,7 +73,7 @@ private:
|
|||
friend class CnchWorkerServiceImpl;
|
||||
friend class CloudTablesBlockSource;
|
||||
|
||||
auto getLock() const { return std::lock_guard(mutex); }
|
||||
auto getLock() const TSA_NO_THREAD_SAFETY_ANALYSIS { return std::lock_guard(mutex); }
|
||||
|
||||
using DatabaseAndTableName = std::pair<String, String>;
|
||||
struct DatabaseAndTableNameHash
|
||||
|
|
|
@ -84,6 +84,12 @@ namespace ProfileEvents
|
|||
extern const Event PreloadExecTotalOps;
|
||||
}
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
extern const Metric WorkerServicePoolTask;
|
||||
extern const Metric WorkerServicePoolPendingTask;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
|
@ -101,8 +107,19 @@ namespace ErrorCodes
|
|||
CnchWorkerServiceImpl::CnchWorkerServiceImpl(ContextMutablePtr context_)
|
||||
: WithMutableContext(context_->getGlobalContext())
|
||||
, log(getLogger("CnchWorkerService"))
|
||||
, thread_pool(getNumberOfPhysicalCPUCores() * 4, getNumberOfPhysicalCPUCores() * 2, getNumberOfPhysicalCPUCores() * 8)
|
||||
{
|
||||
size_t thread_pool_size = getContext()->getConfigRef().getUInt("worker_service_thread_pool_size", 0);
|
||||
if (!thread_pool_size)
|
||||
{
|
||||
thread_pool_size = getNumberOfPhysicalCPUCores() * 4;
|
||||
LOG_INFO(log, "worker_service_thread_pool_size is not set, use default value {} based on available CPU cores", thread_pool_size);
|
||||
}
|
||||
if (thread_pool_size > 5000)
|
||||
{
|
||||
LOG_INFO(log, "Lowering worker_service_thread_pool_size to 5000, current value: {}", thread_pool_size);
|
||||
thread_pool_size = 5000;
|
||||
}
|
||||
thread_pool = std::make_unique<ThreadPool>(thread_pool_size, thread_pool_size / 2, thread_pool_size * 2);
|
||||
}
|
||||
|
||||
CnchWorkerServiceImpl::~CnchWorkerServiceImpl()
|
||||
|
@ -110,7 +127,7 @@ CnchWorkerServiceImpl::~CnchWorkerServiceImpl()
|
|||
try
|
||||
{
|
||||
LOG_TRACE(log, "Waiting local thread pool finishing");
|
||||
thread_pool.wait();
|
||||
thread_pool->wait();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -121,10 +138,12 @@ CnchWorkerServiceImpl::~CnchWorkerServiceImpl()
|
|||
#define THREADPOOL_SCHEDULE(func) \
|
||||
try \
|
||||
{ \
|
||||
thread_pool.scheduleOrThrowOnError(std::move(func)); \
|
||||
CurrentMetrics::add(CurrentMetrics::WorkerServicePoolPendingTask); \
|
||||
thread_pool->scheduleOrThrowOnError(std::move(func)); \
|
||||
} \
|
||||
catch (...) \
|
||||
{ \
|
||||
CurrentMetrics::sub(CurrentMetrics::WorkerServicePoolPendingTask); \
|
||||
tryLogCurrentException(log, __PRETTY_FUNCTION__); \
|
||||
RPCHelpers::handleException(response->mutable_exception()); \
|
||||
done->Run(); \
|
||||
|
@ -132,6 +151,8 @@ CnchWorkerServiceImpl::~CnchWorkerServiceImpl()
|
|||
|
||||
#define SUBMIT_THREADPOOL(...) \
|
||||
auto _func = [=, this] { \
|
||||
CurrentMetrics::sub(CurrentMetrics::WorkerServicePoolPendingTask); \
|
||||
CurrentMetrics::Increment metric_increment{CurrentMetrics::WorkerServicePoolTask}; \
|
||||
brpc::ClosureGuard done_guard(done); \
|
||||
try \
|
||||
{ \
|
||||
|
|
|
@ -246,7 +246,7 @@ private:
|
|||
// class PreloadHandler;
|
||||
// std::shared_ptr<PreloadHandler> preload_handler;
|
||||
|
||||
ThreadPool thread_pool;
|
||||
std::unique_ptr<ThreadPool> thread_pool;
|
||||
|
||||
std::mutex backup_lock;
|
||||
std::unique_ptr<ThreadPool> backup_rpc_pool;
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#pragma once
|
||||
#include <CloudServices/CnchServerResource.h>
|
||||
#include <CloudServices/CnchWorkerResource.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <Interpreters/WorkerStatusManager.h>
|
||||
#include <Protos/cnch_worker_rpc.pb.h>
|
||||
#include <Storages/ColumnsDescription.h>
|
||||
|
@ -34,12 +35,43 @@ static void loadQueryResource(const T & query_resource, const ContextPtr & conte
|
|||
{
|
||||
static LoggerPtr log = getLogger("WorkerResource");
|
||||
LOG_TRACE(log, "Receiving resources for Session: {}", query_resource.txn_id());
|
||||
|
||||
struct Stats
|
||||
{
|
||||
UInt64 init_us;
|
||||
UInt64 ddl;
|
||||
UInt64 ddl_us;
|
||||
UInt64 parts;
|
||||
UInt64 vparts;
|
||||
UInt64 lakeparts;
|
||||
UInt64 files;
|
||||
UInt64 load_us;
|
||||
String toString() const
|
||||
{
|
||||
WriteBufferFromOwnString buf;
|
||||
buf << "init in " << init_us << ", " << ddl << " ddl in " << ddl_us;
|
||||
buf << ", load ";
|
||||
if (parts)
|
||||
buf << parts << " parts ";
|
||||
if (vparts)
|
||||
buf << vparts << " vparts ";
|
||||
if (lakeparts)
|
||||
buf << lakeparts << " lakeparts ";
|
||||
if (files)
|
||||
buf << files << " files ";
|
||||
buf << "in " << load_us;
|
||||
return buf.str();
|
||||
}
|
||||
};
|
||||
Stats stats{};
|
||||
|
||||
Stopwatch watch;
|
||||
auto session = context->acquireNamedCnchSession(query_resource.txn_id(), query_resource.timeout(), false);
|
||||
auto session_context = session->context;
|
||||
session_context->setTemporaryTransaction(query_resource.txn_id(), query_resource.primary_txn_id());
|
||||
if (query_resource.has_session_timezone())
|
||||
session_context->setSetting("session_timezone", query_resource.session_timezone());
|
||||
stats.init_us = watch.elapsedMicroseconds();
|
||||
|
||||
CurrentThread::QueryScope query_scope(session_context);
|
||||
auto worker_resource = session_context->getCnchWorkerResource();
|
||||
|
@ -72,15 +104,13 @@ static void loadQueryResource(const T & query_resource, const ContextPtr & conte
|
|||
object_columns);
|
||||
}
|
||||
create_timer.stop();
|
||||
LOG_INFO(
|
||||
log,
|
||||
"Prepared {} tables for session {} in {} us",
|
||||
query_resource.create_queries_size() + query_resource.cacheable_create_queries_size(),
|
||||
query_resource.txn_id(),
|
||||
create_timer.elapsedMicroseconds());
|
||||
ProfileEvents::increment(ProfileEvents::QueryCreateTablesMicroseconds, create_timer.elapsedMicroseconds());
|
||||
stats.ddl = query_resource.create_queries_size() + query_resource.cacheable_create_queries_size();
|
||||
stats.ddl_us = create_timer.elapsedMicroseconds();
|
||||
LOG_DEBUG(log, "Prepared {} tables for {} in {} us", stats.ddl, query_resource.txn_id(), stats.ddl_us);
|
||||
ProfileEvents::increment(ProfileEvents::QueryCreateTablesMicroseconds, stats.ddl_us);
|
||||
}
|
||||
|
||||
Stopwatch load_timer;
|
||||
bool lazy_load_parts = query_resource.has_lazy_load_data_parts() && query_resource.lazy_load_data_parts();
|
||||
for (const auto & data : query_resource.data_parts())
|
||||
{
|
||||
|
@ -126,6 +156,7 @@ static void loadQueryResource(const T & query_resource, const ContextPtr & conte
|
|||
}
|
||||
}
|
||||
|
||||
stats.parts += server_parts_size;
|
||||
cloud_merge_tree->receiveDataParts(std::move(server_parts));
|
||||
|
||||
LOG_DEBUG(
|
||||
|
@ -161,17 +192,18 @@ static void loadQueryResource(const T & query_resource, const ContextPtr & conte
|
|||
}
|
||||
}
|
||||
|
||||
cloud_merge_tree->receiveVirtualDataParts(std::move(virtual_parts));
|
||||
stats.vparts += virtual_parts_size;
|
||||
cloud_merge_tree->receiveVirtualDataParts(std::move(virtual_parts));
|
||||
|
||||
LOG_DEBUG(
|
||||
log,
|
||||
"Received {} virtual parts for table {}(txn_id: {}), disk_cache_mode {}, is_dict: {}, lazy_load_parts: {}",
|
||||
virtual_parts_size,
|
||||
cloud_merge_tree->getStorageID().getNameForLogs(),
|
||||
query_resource.txn_id(),
|
||||
query_resource.disk_cache_mode(),
|
||||
is_dict_table,
|
||||
lazy_load_parts);
|
||||
LOG_DEBUG(
|
||||
log,
|
||||
"Received {} virtual parts for table {}(txn_id: {}), disk_cache_mode {}, is_dict: {}, lazy_load_parts: {}",
|
||||
virtual_parts_size,
|
||||
cloud_merge_tree->getStorageID().getNameForLogs(),
|
||||
query_resource.txn_id(),
|
||||
query_resource.disk_cache_mode(),
|
||||
is_dict_table,
|
||||
lazy_load_parts);
|
||||
}
|
||||
|
||||
std::set<Int64> required_bucket_numbers;
|
||||
|
@ -200,11 +232,13 @@ static void loadQueryResource(const T & query_resource, const ContextPtr & conte
|
|||
{
|
||||
auto settings = hive_table->getSettings();
|
||||
auto lake_scan_infos = ILakeScanInfo::deserialize(data.lake_scan_info_parts(), context, storage->getInMemoryMetadataPtr(), *settings);
|
||||
stats.lakeparts += lake_scan_infos.size();
|
||||
hive_table->loadLakeScanInfos(lake_scan_infos);
|
||||
}
|
||||
else if (auto * cloud_file_table = dynamic_cast<IStorageCloudFile *>(storage.get()))
|
||||
{
|
||||
auto data_parts = createCnchFileDataParts(session_context, data.file_parts());
|
||||
stats.files += data_parts.size();
|
||||
cloud_file_table->loadDataParts(data_parts);
|
||||
|
||||
LOG_DEBUG(
|
||||
|
@ -216,6 +250,7 @@ static void loadQueryResource(const T & query_resource, const ContextPtr & conte
|
|||
else
|
||||
throw Exception("Unknown table engine: " + storage->getName(), ErrorCodes::UNKNOWN_TABLE);
|
||||
}
|
||||
stats.load_us += load_timer.elapsedMicroseconds();
|
||||
|
||||
std::unordered_map<String, UInt64> udf_infos;
|
||||
for (const auto & udf_info : query_resource.udf_infos())
|
||||
|
@ -225,7 +260,7 @@ static void loadQueryResource(const T & query_resource, const ContextPtr & conte
|
|||
}
|
||||
|
||||
watch.stop();
|
||||
LOG_INFO(log, "Load all resources for session {} in {} us.", query_resource.txn_id(), watch.elapsedMicroseconds());
|
||||
LOG_INFO(log, "Received all resources for {} in {} us: {}", query_resource.txn_id(), watch.elapsedMicroseconds(), stats.toString());
|
||||
ProfileEvents::increment(ProfileEvents::QueryLoadResourcesMicroseconds, watch.elapsedMicroseconds());
|
||||
}
|
||||
|
||||
|
|
|
@ -23,14 +23,18 @@
|
|||
#include <Storages/MergeTree/MergeSelectorAdaptiveController.h>
|
||||
#include <Storages/StorageCnchMergeTree.h>
|
||||
|
||||
#include <boost/functional/hash.hpp>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
static void groupPartsByColumnsMutationsCommitTime(const ServerDataPartsVector & parts, std::vector<ServerDataPartsVector> & part_ranges);
|
||||
|
||||
ServerSelectPartsDecision selectPartsToMerge(
|
||||
const MergeTreeMetaBase & data,
|
||||
std::vector<ServerDataPartsVector> & res,
|
||||
const ServerDataPartsVector & data_parts,
|
||||
const std::multimap<String, UInt64> & unselectable_part_rows,
|
||||
const std::unordered_map<String, std::pair<UInt64, UInt64> > & unselectable_part_rows,
|
||||
ServerCanMergeCallback can_merge_callback,
|
||||
const SelectPartsToMergeSettings & settings,
|
||||
LoggerPtr log)
|
||||
|
@ -50,11 +54,12 @@ ServerSelectPartsDecision selectPartsToMerge(
|
|||
bool aggressive = settings.aggressive;
|
||||
bool enable_batch_select = settings.enable_batch_select;
|
||||
bool final = settings.final;
|
||||
bool select_nonadjacent_parts_allowed = data_settings->cnch_merge_select_nonadjacent_parts.value;
|
||||
// bool merge_with_ttl_allowed = settings.merge_with_ttl_allowed
|
||||
|
||||
time_t current_time = std::time(nullptr);
|
||||
|
||||
IMergeSelector::PartsRanges parts_ranges;
|
||||
IMergeSelector<ServerDataPart>::PartsRanges parts_ranges;
|
||||
|
||||
/// StoragePolicyPtr storage_policy = data.getStoragePolicy(IStorage::StorageLocation::MAIN);
|
||||
/// Volumes with stopped merges are extremely rare situation.
|
||||
|
@ -79,93 +84,104 @@ ServerSelectPartsDecision selectPartsToMerge(
|
|||
|
||||
for (auto & bucket: buckets)
|
||||
{
|
||||
const String * prev_partition_id = nullptr;
|
||||
/// Previous part only in boundaries of partition frame
|
||||
const ServerDataPartPtr * prev_part = nullptr;
|
||||
std::vector<ServerDataPartsVector> part_ranges_before_split;
|
||||
if (select_nonadjacent_parts_allowed)
|
||||
groupPartsByColumnsMutationsCommitTime(bucket.second, part_ranges_before_split);
|
||||
else
|
||||
part_ranges_before_split.emplace_back(std::move(bucket.second));
|
||||
|
||||
for (const auto & part : bucket.second)
|
||||
for (const auto & range_before_split: part_ranges_before_split)
|
||||
{
|
||||
const String & partition_id = part->info().partition_id;
|
||||
const String * prev_partition_id = nullptr;
|
||||
/// Previous part only in boundaries of partition frame
|
||||
const ServerDataPartPtr * prev_part = nullptr;
|
||||
|
||||
if (!prev_partition_id
|
||||
|| partition_id != *prev_partition_id
|
||||
|| (!parts_ranges.empty() && parts_ranges.back().size() >= max_parts_to_break))
|
||||
for (const auto & part : range_before_split)
|
||||
{
|
||||
if (parts_ranges.empty() || !parts_ranges.back().empty())
|
||||
parts_ranges.emplace_back();
|
||||
const String & partition_id = part->info().partition_id;
|
||||
|
||||
/// New partition frame.
|
||||
prev_partition_id = &partition_id;
|
||||
prev_part = nullptr;
|
||||
}
|
||||
/// If select_nonadjacent_parts_allowed is true, DanceMergeSelector will reorder parts by rows
|
||||
bool need_split_by_max_parts_to_break = !select_nonadjacent_parts_allowed
|
||||
&& !parts_ranges.empty() && parts_ranges.back().size() >= max_parts_to_break;
|
||||
|
||||
/// Check predicate only for the first part in each range.
|
||||
if (!prev_part)
|
||||
{
|
||||
/* Parts can be merged with themselves for TTL needs for example.
|
||||
* So we have to check if this part is currently being inserted with quorum and so on and so forth.
|
||||
* Obviously we have to check it manually only for the first part
|
||||
* of each partition because it will be automatically checked for a pair of parts. */
|
||||
if (!can_merge_callback(nullptr, part))
|
||||
continue;
|
||||
|
||||
/// This part can be merged only with next parts (no prev part exists), so start
|
||||
/// new interval if previous was not empty.
|
||||
if (!parts_ranges.back().empty())
|
||||
parts_ranges.emplace_back();
|
||||
}
|
||||
else
|
||||
{
|
||||
/// If we cannot merge with previous part we had to start new parts
|
||||
/// interval (in the same partition)
|
||||
if (!can_merge_callback(*prev_part, part))
|
||||
if (!prev_partition_id || partition_id != *prev_partition_id || need_split_by_max_parts_to_break)
|
||||
{
|
||||
/// Now we have no previous part
|
||||
if (parts_ranges.empty() || !parts_ranges.back().empty())
|
||||
parts_ranges.emplace_back();
|
||||
|
||||
/// New partition frame.
|
||||
prev_partition_id = &partition_id;
|
||||
prev_part = nullptr;
|
||||
}
|
||||
|
||||
/// Mustn't be empty
|
||||
assert(!parts_ranges.back().empty());
|
||||
|
||||
/// Some parts cannot be merged with previous parts and also cannot be merged with themselves,
|
||||
/// for example, merge is already assigned for such parts, or they participate in quorum inserts
|
||||
/// and so on.
|
||||
/// Also we don't start new interval here (maybe all next parts cannot be merged and we don't want to have empty interval)
|
||||
/// Check predicate only for the first part in each range.
|
||||
if (!prev_part)
|
||||
{
|
||||
/* Parts can be merged with themselves for TTL needs for example.
|
||||
* So we have to check if this part is currently being inserted with quorum and so on and so forth.
|
||||
* Obviously we have to check it manually only for the first part
|
||||
* of each partition because it will be automatically checked for a pair of parts. */
|
||||
if (!can_merge_callback(nullptr, part))
|
||||
continue;
|
||||
|
||||
/// Starting new interval in the same partition
|
||||
parts_ranges.emplace_back();
|
||||
/// This part can be merged only with next parts (no prev part exists), so start
|
||||
/// new interval if previous was not empty.
|
||||
if (!parts_ranges.back().empty())
|
||||
parts_ranges.emplace_back();
|
||||
}
|
||||
else
|
||||
{
|
||||
/// If we cannot merge with previous part we had to start new parts
|
||||
/// interval (in the same partition)
|
||||
if (!can_merge_callback(*prev_part, part))
|
||||
{
|
||||
/// Now we have no previous part
|
||||
prev_part = nullptr;
|
||||
|
||||
/// Mustn't be empty
|
||||
assert(!parts_ranges.back().empty());
|
||||
|
||||
/// Some parts cannot be merged with previous parts and also cannot be merged with themselves,
|
||||
/// for example, merge is already assigned for such parts, or they participate in quorum inserts
|
||||
/// and so on.
|
||||
/// Also we don't start new interval here (maybe all next parts cannot be merged and we don't want to have empty interval)
|
||||
if (!can_merge_callback(nullptr, part))
|
||||
continue;
|
||||
|
||||
/// Starting new interval in the same partition
|
||||
parts_ranges.emplace_back();
|
||||
}
|
||||
}
|
||||
|
||||
IMergeSelector<ServerDataPart>::Part part_info;
|
||||
part_info.size = part->part_model().size();
|
||||
time_t part_commit_time = TxnTimestamp(part->getCommitTime()).toSecond();
|
||||
auto p_part = part->tryGetPreviousPart();
|
||||
while (p_part)
|
||||
{
|
||||
++part_info.chain_depth;
|
||||
part_info.size += p_part->part_model().size();
|
||||
part_commit_time = TxnTimestamp(p_part->getCommitTime()).toSecond();
|
||||
p_part = p_part->tryGetPreviousPart();
|
||||
}
|
||||
/// Consider the base part's age as the part chain's age,
|
||||
/// so that the merge selector will give it a better score.
|
||||
part_info.age = current_time > part_commit_time ? current_time - part_commit_time : 0;
|
||||
part_info.rows = part->rowsCount();
|
||||
part_info.level = part->info().level;
|
||||
part_info.data = part.get();
|
||||
/// TODO:
|
||||
/// part_info.ttl_infos = &part->ttl_infos;
|
||||
/// part_info.compression_codec_desc = part->default_codec->getFullCodecDesc();
|
||||
/// part_info.shall_participate_in_merges = has_volumes_with_disabled_merges ? part->shallParticipateInMerges(storage_policy) : true;
|
||||
part_info.shall_participate_in_merges = true;
|
||||
|
||||
++parts_selected_precondition;
|
||||
|
||||
parts_ranges.back().emplace_back(part_info);
|
||||
|
||||
prev_part = ∂
|
||||
}
|
||||
|
||||
IMergeSelector::Part part_info;
|
||||
part_info.size = part->part_model().size();
|
||||
time_t part_commit_time = TxnTimestamp(part->getCommitTime()).toSecond();
|
||||
auto p_part = part->tryGetPreviousPart();
|
||||
while (p_part)
|
||||
{
|
||||
++part_info.chain_depth;
|
||||
part_info.size += p_part->part_model().size();
|
||||
part_commit_time = TxnTimestamp(p_part->getCommitTime()).toSecond();
|
||||
p_part = p_part->tryGetPreviousPart();
|
||||
}
|
||||
/// Consider the base part's age as the part chain's age,
|
||||
/// so that the merge selector will give it a better score.
|
||||
part_info.age = current_time > part_commit_time ? current_time - part_commit_time : 0;
|
||||
part_info.rows = part->rowsCount();
|
||||
part_info.level = part->info().level;
|
||||
part_info.data = ∂
|
||||
/// TODO:
|
||||
/// part_info.ttl_infos = &part->ttl_infos;
|
||||
/// part_info.compression_codec_desc = part->default_codec->getFullCodecDesc();
|
||||
/// part_info.shall_participate_in_merges = has_volumes_with_disabled_merges ? part->shallParticipateInMerges(storage_policy) : true;
|
||||
part_info.shall_participate_in_merges = true;
|
||||
|
||||
++parts_selected_precondition;
|
||||
|
||||
parts_ranges.back().emplace_back(part_info);
|
||||
|
||||
prev_part = ∂
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -179,7 +195,7 @@ ServerSelectPartsDecision selectPartsToMerge(
|
|||
/*
|
||||
if (metadata_snapshot->hasAnyTTL() && merge_with_ttl_allowed && !ttl_merges_blocker.isCancelled())
|
||||
{
|
||||
IMergeSelector::PartsRange parts_to_merge;
|
||||
IMergeSelector<ServerDataPart>::PartsRange parts_to_merge;
|
||||
|
||||
/// TTL delete is preferred to recompression
|
||||
TTLDeleteMergeSelector delete_ttl_selector(
|
||||
|
@ -238,7 +254,7 @@ ServerSelectPartsDecision selectPartsToMerge(
|
|||
merge_settings.min_parts_to_merge_base = 1;
|
||||
merge_settings.final = final;
|
||||
merge_settings.max_age_for_single_part_chain = data_settings->merge_with_ttl_timeout;
|
||||
merge_settings.select_nonadjacent_parts_allowed = data_settings->cnch_merge_select_nonadjacent_parts;
|
||||
merge_settings.select_nonadjacent_parts_allowed = select_nonadjacent_parts_allowed;
|
||||
auto merge_selector = std::make_unique<DanceMergeSelector>(merge_settings);
|
||||
|
||||
/// Using adaptive controller
|
||||
|
@ -252,9 +268,13 @@ ServerSelectPartsDecision selectPartsToMerge(
|
|||
|
||||
if (expected_parts_number > 0)
|
||||
{
|
||||
UInt64 write_amplification_optimize_threshold = data_settings->cnch_merge_write_amplification_optimize_threshold.value;
|
||||
if (log)
|
||||
LOG_TRACE(log, "Using adaptive controller, expected_parts_number is {}", expected_parts_number);
|
||||
auto adaptive_controller = std::make_shared<MergeSelectorAdaptiveController>(
|
||||
data.isBucketTable(),
|
||||
expected_parts_number,
|
||||
write_amplification_optimize_threshold,
|
||||
merge_settings.max_parts_to_merge_base.value);
|
||||
adaptive_controller->init(bg_task_stats, parts_ranges, unselectable_part_rows);
|
||||
merge_selector->setAdaptiveController(adaptive_controller);
|
||||
|
@ -280,7 +300,7 @@ ServerSelectPartsDecision selectPartsToMerge(
|
|||
{
|
||||
if (log)
|
||||
LOG_ERROR(log, "merge selector returned only one part to merge {}, skip this range.",
|
||||
(*static_cast<const ServerDataPartPtr *>(range.front().data))->name());
|
||||
static_cast<const ServerDataPart *>(range.front().data)->name());
|
||||
continue;
|
||||
}
|
||||
// throw Exception("Logical error: merge selector returned only one part to merge", ErrorCodes::LOGICAL_ERROR);
|
||||
|
@ -288,8 +308,8 @@ ServerSelectPartsDecision selectPartsToMerge(
|
|||
auto & emplaced_parts = res.emplace_back();
|
||||
emplaced_parts.reserve(range.size());
|
||||
for (auto & part : range)
|
||||
emplaced_parts.push_back(*static_cast<const ServerDataPartPtr *>(part.data));
|
||||
|
||||
emplaced_parts.push_back(static_cast<const ServerDataPart *>(part.data)->shared_from_this());
|
||||
|
||||
/// When enable selct nonadjacent parts, merge selector can sort parts by rows/size/age to get a
|
||||
/// better selection. After selection, we need to sort parts again to get right result part name.
|
||||
if (data_settings->cnch_merge_select_nonadjacent_parts.value)
|
||||
|
@ -315,4 +335,23 @@ void groupPartsByBucketNumber(const MergeTreeMetaBase & data, std::unordered_map
|
|||
}
|
||||
}
|
||||
|
||||
static void groupPartsByColumnsMutationsCommitTime(const ServerDataPartsVector & parts, std::vector<ServerDataPartsVector> & part_ranges)
|
||||
{
|
||||
using GroupKeyType = std::pair<UInt64, UInt64>;
|
||||
std::unordered_map<GroupKeyType, ServerDataPartsVector, boost::hash<GroupKeyType> > grouped_ranges;
|
||||
|
||||
for (const auto & p: parts)
|
||||
{
|
||||
GroupKeyType key = std::make_pair(p->getColumnsCommitTime(), p->getMutationCommitTime());
|
||||
auto it = grouped_ranges.try_emplace(key).first;
|
||||
it->second.emplace_back(p);
|
||||
}
|
||||
|
||||
for (auto & [_, range]: grouped_ranges)
|
||||
{
|
||||
part_ranges.emplace_back();
|
||||
std::swap(range, part_ranges.back());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ ServerSelectPartsDecision selectPartsToMerge(
|
|||
const MergeTreeMetaBase & data,
|
||||
std::vector<ServerDataPartsVector> & res,
|
||||
const ServerDataPartsVector & data_parts,
|
||||
const std::multimap<String, UInt64> & unselectable_part_rows,
|
||||
const std::unordered_map<String, std::pair<UInt64, UInt64> > & unselectable_part_rows,
|
||||
ServerCanMergeCallback can_merge_callback,
|
||||
const SelectPartsToMergeSettings & settings,
|
||||
LoggerPtr log);
|
||||
|
|
|
@ -0,0 +1,241 @@
|
|||
#include <CloudServices/CnchCreateQueryHelper.h>
|
||||
#include <CloudServices/CnchPartsHelper.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <gtest/gtest.h>
|
||||
#include <Common/tests/gtest_global_context.h>
|
||||
#include <Common/tests/gtest_global_register.h>
|
||||
#include <Catalog/Catalog.h>
|
||||
#include <Catalog/DataModelPartWrapper_fwd.h>
|
||||
#include <Storages/MergeTree/DeleteBitmapMeta.h>
|
||||
|
||||
namespace GTEST_Parts_Helper {
|
||||
|
||||
class CalcVisibility: public ::testing::Test
|
||||
{
|
||||
protected:
|
||||
static void SetUpTestSuite()
|
||||
{
|
||||
tryRegisterStorages();
|
||||
tryRegisterDisks();
|
||||
getContext().resetStoragePolicy();
|
||||
}
|
||||
};
|
||||
|
||||
using namespace DB;
|
||||
|
||||
DataModelPartPtr
|
||||
createPart(const String & partition_id, UInt64 min_block, UInt64 max_block, UInt64 level, UInt64 hint_mutation = 0)
|
||||
{
|
||||
DataModelPartPtr part_model = std::make_shared<Protos::DataModelPart>();
|
||||
Protos::DataModelPartInfo * info_model = part_model->mutable_part_info();
|
||||
|
||||
info_model->set_partition_id(partition_id);
|
||||
info_model->set_min_block(min_block);
|
||||
info_model->set_max_block(max_block);
|
||||
info_model->set_level(level);
|
||||
info_model->set_hint_mutation(hint_mutation);
|
||||
|
||||
part_model->set_rows_count(0);
|
||||
part_model->set_partition_minmax("xxxx");
|
||||
part_model->set_marks_count(0);
|
||||
part_model->set_size(818);
|
||||
|
||||
return part_model;
|
||||
}
|
||||
|
||||
DB::ServerDataPartPtr createServerDataPart(
|
||||
StoragePtr storage, const String & partition_id, size_t min_block, size_t max_block, size_t level, bool deleted, size_t commit_time, size_t hint_mutation)
|
||||
{
|
||||
auto part_model = createPart(partition_id, min_block, max_block, level, hint_mutation);
|
||||
part_model->set_deleted(deleted);
|
||||
part_model->set_commit_time(commit_time);
|
||||
|
||||
const auto & merge_tree = dynamic_cast<const MergeTreeMetaBase &>(*storage);
|
||||
|
||||
auto part = createPartWrapperFromModel(merge_tree, Protos::DataModelPart(*part_model));
|
||||
part->part_model->set_commit_time(commit_time);
|
||||
auto ret = std::make_shared<const DB::ServerDataPart>(DB::ServerDataPart(part));
|
||||
return ret;
|
||||
}
|
||||
|
||||
void checkParts(ServerDataPartsVector parts, ServerDataPartsVector expected)
|
||||
{
|
||||
sort(parts.begin(), parts.end(), [](const auto & lhs, const auto & rhs) { return lhs->name() < rhs->name(); });
|
||||
sort(expected.begin(), expected.end(), [](const auto & lhs, const auto & rhs) { return lhs->name() < rhs->name(); });
|
||||
if (parts.size() != expected.size())
|
||||
{
|
||||
std::cout << "given: " << std::endl;
|
||||
|
||||
for (const auto & part : parts)
|
||||
{
|
||||
std::cout << part->name() << " deleted? " << toString(part->deleted()) << " commit_time: " << toString(part->get_commit_time())
|
||||
<< " previous? " << part->get_info().hint_mutation << std::endl;
|
||||
}
|
||||
|
||||
std::cout << "expected: " << std::endl;
|
||||
|
||||
for (const auto & part : expected)
|
||||
{
|
||||
std::cout << part->name() << " deleted? " << toString(part->deleted()) << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_EQ(parts.size(), expected.size());
|
||||
for (size_t i = 0; i < parts.size(); i++) {
|
||||
EXPECT_EQ(parts[i]->name(), expected[i]->name());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void flattenParts(ServerDataPartsVector & parts) {
|
||||
for (int i = parts.size() - 1; i >= 0; i--)
|
||||
{
|
||||
auto cur = parts[i];
|
||||
while ((cur = cur->tryGetPreviousPart()))
|
||||
{
|
||||
parts.push_back(cur);
|
||||
}
|
||||
}
|
||||
|
||||
sort(parts.begin(), parts.end(), [](const auto & lhs, const auto & rhs) { return lhs->name() < rhs->name(); });
|
||||
}
|
||||
|
||||
TEST_F(CalcVisibility, Basic)
|
||||
{
|
||||
String query = "create table db.test UUID '61f0c404-5cb3-11e7-907b-a6006ad3dba0' (id Int32) ENGINE=CnchMergeTree order by id";
|
||||
StoragePtr storage = DB::createStorageFromQuery(query, getContext().context);
|
||||
|
||||
auto merge_tree_meta_base = std::dynamic_pointer_cast<DB::MergeTreeMetaBase>(storage);
|
||||
ASSERT_TRUE(merge_tree_meta_base != nullptr);
|
||||
|
||||
auto p = [&](String partition_id,
|
||||
size_t min_block,
|
||||
size_t max_block,
|
||||
size_t level,
|
||||
bool deleted,
|
||||
size_t commit_time,
|
||||
size_t hint_mutation = 0) {
|
||||
return createServerDataPart(storage, partition_id, min_block, max_block, level, deleted, commit_time, hint_mutation);
|
||||
};
|
||||
|
||||
{
|
||||
std::cout << "parts" << std::endl;
|
||||
// P <- Partial
|
||||
// P
|
||||
// P
|
||||
ServerDataPartsVector origin = {
|
||||
p("20230101", 1, 1, 0, false, 111),
|
||||
/// Partial part has higher level and commit_time.
|
||||
p("20230101", 1, 1, 1, false, 123, 111),
|
||||
p("20230101", 2, 2, 0, false, 222),
|
||||
p("20240101", 1, 1, 0, false, 123),
|
||||
};
|
||||
|
||||
|
||||
ServerDataPartsVector invisible;
|
||||
ServerDataPartsVector visible;
|
||||
visible = calcVisibleParts(origin, false, CnchPartsHelper::LoggingOption::DisableLogging, &invisible);
|
||||
|
||||
flattenParts(visible);
|
||||
flattenParts(invisible);
|
||||
|
||||
checkParts(visible, origin);
|
||||
checkParts(invisible, {});
|
||||
}
|
||||
|
||||
{
|
||||
std::cout << "drop range" << std::endl;
|
||||
// P ◄─┬─ DropRange
|
||||
// P ◄─┤
|
||||
// P ◄─┘
|
||||
// P
|
||||
// ---
|
||||
// DropRange
|
||||
ServerDataPartsVector origin = {
|
||||
p("20230101", 1, 1, 0, false, 112),
|
||||
p("20230101", 2, 2, 0, false, 222),
|
||||
p("20230101", 4, 8, 3, false, 234),
|
||||
p("20230101", 0, 10, MergeTreePartInfo::MAX_LEVEL, true, 235),
|
||||
p("20230101", 11, 11, 1, false, 236),
|
||||
p("20240101", 0, 10, MergeTreePartInfo::MAX_LEVEL, true, 235),
|
||||
};
|
||||
|
||||
ServerDataPartsVector invisible;
|
||||
ServerDataPartsVector visible;
|
||||
visible = calcVisibleParts(origin, false, CnchPartsHelper::LoggingOption::DisableLogging, &invisible);
|
||||
|
||||
flattenParts(visible);
|
||||
flattenParts(invisible);
|
||||
|
||||
checkParts(visible, {
|
||||
p("20230101", 11, 11, 1, false, 235),
|
||||
});
|
||||
checkParts(invisible, {
|
||||
p("20230101", 1, 1, 0, false, 112),
|
||||
p("20230101", 2, 2, 0, false, 222),
|
||||
p("20230101", 4, 8, 3, false, 234),
|
||||
p("20230101", 0, 10, MergeTreePartInfo::MAX_LEVEL, true, 235),
|
||||
p("20240101", 0, 10, MergeTreePartInfo::MAX_LEVEL, true, 235),
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
std::cout << "dropped part" << std::endl;
|
||||
// P ◄─ Dropped
|
||||
// P ◄─ Dropped
|
||||
ServerDataPartsVector origin = {
|
||||
p("20230101", 1, 1, 0, false, 111),
|
||||
p("20230101", 1, 1, 1, true, 222),
|
||||
p("20230101", 2, 2, 0, false, 111),
|
||||
p("20230101", 2, 2, 1, true, 222),
|
||||
};
|
||||
|
||||
ServerDataPartsVector invisible;
|
||||
ServerDataPartsVector visible;
|
||||
visible = calcVisibleParts(origin, false, CnchPartsHelper::LoggingOption::DisableLogging, &invisible);
|
||||
|
||||
flattenParts(invisible);
|
||||
flattenParts(visible);
|
||||
|
||||
checkParts(visible, {});
|
||||
checkParts(invisible, origin);
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
std::cout << "dropped part with merge" << std::endl;
|
||||
// P ◄─ Dropped ◄─┬─ P
|
||||
// P ◄─ Dropped ◄─┘
|
||||
ServerDataPartsVector origin = {
|
||||
p("20230101", 1, 1, 0, false, 111),
|
||||
p("20230101", 1, 1, 1, true, 222),
|
||||
p("20230101", 2, 2, 0, false, 111),
|
||||
p("20230101", 2, 2, 1, true, 222),
|
||||
p("20230101", 1, 2, 1, false, 222),
|
||||
};
|
||||
|
||||
ServerDataPartsVector invisible;
|
||||
ServerDataPartsVector visible;
|
||||
visible = calcVisibleParts(origin, false, CnchPartsHelper::LoggingOption::DisableLogging, &invisible);
|
||||
|
||||
flattenParts(visible);
|
||||
flattenParts(invisible);
|
||||
checkParts(
|
||||
visible,
|
||||
{
|
||||
p( "20230101", 1, 2, 1, false, 222),
|
||||
});
|
||||
checkParts(
|
||||
invisible,
|
||||
{
|
||||
p("20230101", 1, 1, 0, false, 111),
|
||||
p("20230101", 1, 1, 1, true, 222),
|
||||
p("20230101", 2, 2, 0, false, 111),
|
||||
p("20230101", 2, 2, 1, true, 222),
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -47,9 +47,12 @@ struct SDConfiguration final : public SDConfigurationData
|
|||
|
||||
#define SDKV_CONFIG_FIELDS_LIST(M) \
|
||||
M(String, election_prefix, "election_prefix", "", ConfigFlag::Recommended, "common prefix for all election keys") \
|
||||
M(String, server_manager_host_path, "server_manager.host_path", "data.cnch.server-election", ConfigFlag::Recommended, "election key of server manager") \
|
||||
M(UInt64, server_manager_refresh_interval_ms, "server_manager.refresh_interval_ms", 1000, ConfigFlag::Default, "") \
|
||||
M(UInt64, server_manager_expired_interval_ms, "server_manager.expired_interval_ms", 5000, ConfigFlag::Default, "") \
|
||||
M(String, topology_maintainer_host_path, "server_manager.host_path", "data.cnch.server-election", ConfigFlag::Recommended, "election key of server manager, now is used by Resource Manager to manage topology. (Server can also do this, but disabled by default)") \
|
||||
M(UInt64, topology_maintainer_refresh_interval_ms, "server_manager.refresh_interval_ms", 1000, ConfigFlag::Default, "") \
|
||||
M(UInt64, topology_maintainer_expired_interval_ms, "server_manager.expired_interval_ms", 3000, ConfigFlag::Default, "") \
|
||||
M(String, server_leader_host_path, "server_leader.host_path", "data.cnch.server_leader-election", ConfigFlag::Recommended, "election key of server leader, used by server to elect a global leader.") \
|
||||
M(UInt64, server_leader_refresh_interval_ms, "server_leader.refresh_interval_ms", 1000, ConfigFlag::Default, "") \
|
||||
M(UInt64, server_leader_expired_interval_ms, "server_leader.expired_interval_ms", 3000, ConfigFlag::Default, "") \
|
||||
M(String, resource_manager_host_path, "resource_manager.host_path", "data.cnch.resource_manager-election", ConfigFlag::Recommended, "election key of resource manager") \
|
||||
M(UInt64, resource_manager_refresh_interval_ms, "resource_manager.refresh_interval_ms", 1000, ConfigFlag::Default, "") \
|
||||
M(UInt64, resource_manager_expired_interval_ms, "resource_manager.expired_interval_ms", 5000, ConfigFlag::Default, "") \
|
||||
|
|
|
@ -168,6 +168,7 @@
|
|||
M(PartsCompact, "Compact parts.") \
|
||||
M(PartsInMemory, "In-memory parts.") \
|
||||
M(PartsCNCH, "CNCH parts.") \
|
||||
M(PartChecksums, "Data part checksums.") \
|
||||
M(MMappedFiles, "Total number of mmapped files.") \
|
||||
M(MMappedFileBytes, "Sum size of mmapped file regions.") \
|
||||
M(AsynchronousReadWait, "Number of threads waiting for asynchronous read.") \
|
||||
|
@ -177,6 +178,9 @@
|
|||
\
|
||||
M(StorageMemoryRows, "Memory table input rows") \
|
||||
M(StorageMemoryBytes, "Memory table input bytes") \
|
||||
\
|
||||
M(WorkerServicePoolTask, "Number of active tasks in worker service thread pool.") \
|
||||
M(WorkerServicePoolPendingTask, "Number of pending tasks in worker service thread pool.") \
|
||||
\
|
||||
M(CnchSDRequestsUpstream, "Number of Service Discovery requests to upstream") \
|
||||
\
|
||||
|
@ -216,7 +220,10 @@
|
|||
M(IOSchRawRequests, "RawRequests in deadline scheduler") \
|
||||
\
|
||||
M(IOUringPendingEvents, "Number of io_uring SQEs waiting to be submitted") \
|
||||
M(IOUringInFlightEvents, "Number of io_uring SQEs in flight")
|
||||
M(IOUringInFlightEvents, "Number of io_uring SQEs in flight") \
|
||||
\
|
||||
M(ActiveCnchSession, "Number of Cnch session in active") \
|
||||
M(ActiveHttpSession, "Number of Cnch session in active") \
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <atomic>
|
||||
#include <common/types.h>
|
||||
#include <metric_helper.h>
|
||||
#include <bits/types/time_t.h>
|
||||
#include <Common/LabelledMetrics.h>
|
||||
|
||||
/** Allows to count number of simultaneously happening processes or current value of some metric.
|
||||
|
@ -53,9 +54,9 @@ namespace CurrentMetrics
|
|||
inline void add(Metric metric, Value value = 1, Metrics::MetricType type = Metrics::MetricType::None, LabelledMetrics::MetricLabels labels = {})
|
||||
{
|
||||
values[metric].fetch_add(value, std::memory_order_relaxed);
|
||||
if (type == Metrics::MetricType::Counter)
|
||||
if (type == Metrics::MetricType::Store)
|
||||
{
|
||||
Metrics::EmitCounter(getSnakeName(metric), value, LabelledMetrics::toString(labels));
|
||||
Metrics::EmitMetric(type, getSnakeName(metric), values[metric].load(std::memory_order_relaxed), LabelledMetrics::toString(labels), {});
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -43,10 +43,6 @@ std::string HostWithPorts::toDebugString() const
|
|||
wb << " rpc/" << rpc_port;
|
||||
if (tcp_port != 0)
|
||||
wb << " tcp/" << tcp_port;
|
||||
if (exchange_port != 0)
|
||||
wb << " exc/" << exchange_port;
|
||||
if (exchange_status_port != 0)
|
||||
wb << " exs/" << exchange_status_port;
|
||||
if (real_id)
|
||||
wb << " real_id/" << *real_id;
|
||||
wb << '}';
|
||||
|
@ -122,4 +118,9 @@ std::string truncateNetworkInterfaceIfHas(const std::string & s)
|
|||
return s;
|
||||
}
|
||||
|
||||
bool HostWithPorts::lessThan(const HostWithPorts & rhs) const
|
||||
{
|
||||
return std::tie(id, host, rpc_port, tcp_port, http_port) < std::tie(rhs.id, rhs.host, rhs.rpc_port, rhs.tcp_port, rhs.http_port);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -174,17 +174,9 @@ class HostWithPorts
|
|||
{
|
||||
public:
|
||||
HostWithPorts() = default;
|
||||
HostWithPorts(const std::string & host_, uint16_t rpc_port_ = 0, uint16_t tcp_port_ = 0, uint16_t http_port_ = 0, [[maybe_unused]] uint16_t exchange_port_ = 0, [[maybe_unused]] uint16_t exchange_status_port_ = 0, std::string id_ = {})
|
||||
: host{removeBracketsIfIpv6(host_)},
|
||||
id{std::move(id_)},
|
||||
rpc_port{rpc_port_},
|
||||
tcp_port{tcp_port_},
|
||||
http_port{http_port_},
|
||||
exchange_port{rpc_port_},
|
||||
exchange_status_port{rpc_port_}
|
||||
HostWithPorts(const std::string & host_, uint16_t rpc_port_ = 0, uint16_t tcp_port_ = 0, uint16_t http_port_ = 0, std::string id_ = {})
|
||||
: host{removeBracketsIfIpv6(host_)}, id{std::move(id_)}, rpc_port{rpc_port_}, tcp_port{tcp_port_}, http_port{http_port_}
|
||||
{
|
||||
(void)exchange_port_;
|
||||
(void)exchange_status_port_;
|
||||
}
|
||||
|
||||
std::string host;
|
||||
|
@ -192,11 +184,8 @@ public:
|
|||
uint16_t rpc_port{0};
|
||||
uint16_t tcp_port{0};
|
||||
uint16_t http_port{0};
|
||||
uint16_t exchange_port{0};
|
||||
uint16_t exchange_status_port{0};
|
||||
PairInt64 topology_version = PairInt64{0, 0};
|
||||
std::optional<String> real_id;
|
||||
public:
|
||||
|
||||
bool empty() const { return host.empty() || (rpc_port == 0 && tcp_port == 0); }
|
||||
|
||||
|
@ -206,7 +195,11 @@ public:
|
|||
std::string getExchangeAddress() const { return getRPCAddress(); }
|
||||
std::string getExchangeStatusAddress() const { return getRPCAddress(); }
|
||||
|
||||
/// This is only use for judging equality for workers
|
||||
bool operator<(const HostWithPorts & rhs) const { return id < rhs.getId(); }
|
||||
/// This is only use for judging equality for servers.
|
||||
/// Same with `HostWithPorts::isExactlySame`
|
||||
bool lessThan(const HostWithPorts & rhs) const;
|
||||
const std::string & getHost() const { return host; }
|
||||
uint16_t getTCPPort() const { return tcp_port; }
|
||||
uint16_t getHTTPPort() const { return http_port; }
|
||||
|
@ -233,8 +226,7 @@ public:
|
|||
bool operator()(const HostWithPorts & lhs, const HostWithPorts & rhs) const
|
||||
{
|
||||
return lhs.id == rhs.id && isSameHost(lhs.host, rhs.host) && lhs.rpc_port == rhs.rpc_port && lhs.tcp_port == rhs.tcp_port
|
||||
&& lhs.http_port == rhs.http_port && lhs.exchange_port == rhs.exchange_port
|
||||
&& lhs.exchange_status_port == rhs.exchange_status_port;
|
||||
&& lhs.http_port == rhs.http_port;
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -483,6 +483,7 @@
|
|||
M(CnchLoadMarksRequests, "") \
|
||||
M(CnchLoadMarksBytes, "") \
|
||||
M(CnchLoadMarksMicroseconds, "") \
|
||||
M(CnchLoadChecksumsMicroseconds, "load checksums cost time") \
|
||||
M(CnchLoadMarksFromDiskCacheRequests, "") \
|
||||
M(CnchLoadMarksFromDiskCacheBytes, "") \
|
||||
M(CnchLoadMarksFromDiskCacheMicroseconds, "") \
|
||||
|
|
|
@ -74,7 +74,7 @@ protected:
|
|||
TLDListsHolder();
|
||||
|
||||
std::mutex tld_lists_map_mutex;
|
||||
Map tld_lists_map;
|
||||
Map tld_lists_map TSA_GUARDED_BY(tld_lists_map_mutex);
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -70,25 +70,23 @@ TEST(HostWithPortsUtils, HostWithPortsGetAddress)
|
|||
constexpr uint16_t rpc_port = 9000;
|
||||
constexpr uint16_t tcp_port = 9001;
|
||||
constexpr uint16_t http_port = 9002;
|
||||
constexpr uint16_t exchange_port = 9003;
|
||||
constexpr uint16_t exchange_status_port = 9004;
|
||||
|
||||
HostWithPorts hp0 {"::1", rpc_port, tcp_port, http_port, exchange_port, exchange_status_port, ""};
|
||||
HostWithPorts hp0 {"::1", rpc_port, tcp_port, http_port, ""};
|
||||
EXPECT_EQ(hp0.getRPCAddress(), "[::1]:9000");
|
||||
EXPECT_EQ(hp0.getTCPAddress(), "[::1]:9001");
|
||||
EXPECT_EQ(hp0.getHTTPAddress(), "[::1]:9002");
|
||||
|
||||
HostWithPorts hp1 {"[::1]", rpc_port, tcp_port, http_port, exchange_port, exchange_status_port, ""};
|
||||
HostWithPorts hp1 {"[::1]", rpc_port, tcp_port, http_port, ""};
|
||||
EXPECT_EQ(hp1.getRPCAddress(), "[::1]:9000");
|
||||
EXPECT_EQ(hp1.getTCPAddress(), "[::1]:9001");
|
||||
EXPECT_EQ(hp1.getHTTPAddress(), "[::1]:9002");
|
||||
|
||||
HostWithPorts hp2 {"127.0.0.1", rpc_port, tcp_port, http_port, exchange_port, exchange_status_port, ""};
|
||||
HostWithPorts hp2 {"127.0.0.1", rpc_port, tcp_port, http_port, ""};
|
||||
EXPECT_EQ(hp2.getRPCAddress(), "127.0.0.1:9000");
|
||||
EXPECT_EQ(hp2.getTCPAddress(), "127.0.0.1:9001");
|
||||
EXPECT_EQ(hp2.getHTTPAddress(), "127.0.0.1:9002");
|
||||
|
||||
HostWithPorts hp3 {"www.google.com", rpc_port, tcp_port, http_port, exchange_port, exchange_status_port, ""};
|
||||
HostWithPorts hp3 {"www.google.com", rpc_port, tcp_port, http_port, ""};
|
||||
EXPECT_EQ(hp3.getRPCAddress(), "www.google.com:9000");
|
||||
EXPECT_EQ(hp3.getTCPAddress(), "www.google.com:9001");
|
||||
EXPECT_EQ(hp3.getHTTPAddress(), "www.google.com:9002");
|
||||
|
@ -112,22 +110,20 @@ TEST(HostWithPortsUtils, HostWithPortHash)
|
|||
constexpr uint16_t rpc_port = 9000;
|
||||
constexpr uint16_t tcp_port = 9001;
|
||||
constexpr uint16_t http_port = 9002;
|
||||
constexpr uint16_t exchange_port = 9003;
|
||||
constexpr uint16_t exchange_status_port = 9004;
|
||||
std::hash<DB::HostWithPorts> hasher;
|
||||
|
||||
HostWithPorts hp0 {"::1", rpc_port, tcp_port, http_port, exchange_port, exchange_status_port, ""};
|
||||
HostWithPorts hp0 {"::1", rpc_port, tcp_port, http_port, ""};
|
||||
EXPECT_EQ(hasher(hp0), hasher(hp0));
|
||||
HostWithPorts hp1 {"[::1]", rpc_port, tcp_port, http_port, exchange_port, exchange_status_port, ""};
|
||||
HostWithPorts hp1{"[::1]", rpc_port, tcp_port, http_port, ""};
|
||||
EXPECT_EQ(hasher(hp1), hasher(hp1));
|
||||
|
||||
HostWithPorts hp2 {"[1:1:3:1::166]", rpc_port, tcp_port, http_port, exchange_port, exchange_status_port, ""};
|
||||
HostWithPorts hp3 {"1:1:3:1::166", rpc_port, tcp_port, http_port, exchange_port, exchange_status_port, ""};
|
||||
HostWithPorts hp2 {"[1:1:3:1::166]", rpc_port, tcp_port, http_port, ""};
|
||||
HostWithPorts hp3 {"1:1:3:1::166", rpc_port, tcp_port, http_port, ""};
|
||||
EXPECT_EQ(hasher(hp2), hasher(hp3));
|
||||
EXPECT_EQ(hasher(hp2), hasher(hp2));
|
||||
EXPECT_EQ(hasher(hp3), hasher(hp3));
|
||||
|
||||
HostWithPorts hp4 {"10.1.1.1", rpc_port, tcp_port, http_port, exchange_port, exchange_status_port, ""};
|
||||
HostWithPorts hp4 {"10.1.1.1", rpc_port, tcp_port, http_port, ""};
|
||||
EXPECT_EQ(hasher(hp4), hasher(hp4));
|
||||
}
|
||||
|
||||
|
|
|
@ -1043,7 +1043,7 @@ INSTANTIATE_TEST_SUITE_P(RandomInt,
|
|||
::testing::Combine(
|
||||
DefaultCodecsToTest,
|
||||
::testing::Values(
|
||||
generateSeq<UInt8 >(G(RandomGenerator<UInt8>(0))),
|
||||
generateSeq<UInt8 >(G(RandomGenerator<uint8_t>(0))),
|
||||
generateSeq<UInt16>(G(RandomGenerator<UInt16>(0))),
|
||||
generateSeq<UInt32>(G(RandomGenerator<UInt32>(0, 0, 1000'000'000))),
|
||||
generateSeq<UInt64>(G(RandomGenerator<UInt64>(0, 0, 1000'000'000)))
|
||||
|
|
|
@ -298,7 +298,7 @@ enum PreloadLevelSettings : UInt64
|
|||
M(Bool, optimize_distributed_group_by_sharding_key, false, "Optimize GROUP BY sharding_key queries (by avoiding costly aggregation on the initiator server).", 0) \
|
||||
M(UInt64, optimize_skip_unused_shards_limit, 1000, "Limit for number of sharding key values, turns off optimize_skip_unused_shards if the limit is reached", 0) \
|
||||
M(Bool, distributed_perfect_shard, false, "Whether to enable aggregation finished in worker side, to avoid merge aggregation states in coordinator", 0) \
|
||||
M(VWLoadBalancing, vw_load_balancing, VWLoadBalancing::RANDOM, "Which worker group to select according to prority of worker groups(random in_order reverse_order).", 0) \
|
||||
M(VWLoadBalancing, vw_load_balancing, VWLoadBalancing::IN_ORDER, "Which worker group to select according to prority of worker groups(random in_order reverse_order).", 0) \
|
||||
M(Bool, fallback_perfect_shard, true, "Whether to fallback if there is any exception", 0) \
|
||||
M(Bool, optimize_skip_unused_shards, false, "Assumes that data is distributed by sharding_key. Optimization to skip unused shards if SELECT query filters by sharding_key.", 0) \
|
||||
M(Bool, optimize_skip_unused_shards_rewrite_in, true, "Rewrite IN in query for remote shards to exclude values that does not belong to the shard (requires optimize_skip_unused_shards)", 0) \
|
||||
|
@ -1420,7 +1420,7 @@ enum PreloadLevelSettings : UInt64
|
|||
M(Bool, enable_distinct_remove, true, "Whether to eliminate redundancy during execution", 0) \
|
||||
M(Bool, enable_single_distinct_to_group_by, true, "Whether enable convert single count distinct to group by", 0) \
|
||||
M(Bool, enable_mark_distinct_optimzation, false, "Whether enable Mark distinct optimization", 0) \
|
||||
M(Bool, enable_expand_distinct_optimization, true, "Whether enable rewrite distinct optimization", 0) \
|
||||
M(Bool, enable_expand_distinct_optimization, false, "Whether enable rewrite distinct optimization", 0) \
|
||||
M(ExpandMode, expand_mode, ExpandMode::EXPAND, "Rewrite distinct optimization, Expand Mode : EXPAND|UNION|CTE", 0) \
|
||||
M(Bool, enable_common_predicate_rewrite, true, "Whether enable common predicate rewrite", 0) \
|
||||
M(Bool, enable_common_join_predicate_rewrite, true, "Whether enable common predicate rewrite", 0) \
|
||||
|
|
|
@ -137,10 +137,12 @@ bool isMapImplicitFileNameOfSpecialMapName(const String file_name, const String
|
|||
bool isMapImplicitDataFileNameOfSpecialMapName(const String & file_name, const String map_col);
|
||||
|
||||
/// Get range of files from ordered files (e.g. std::map) with a hacking solution.
|
||||
template <class V>
|
||||
std::pair<typename std::map<String, V>::const_iterator, typename std::map<String, V>::const_iterator>
|
||||
getFileRangeFromOrderedFilesByPrefix(const String & prefix, const std::map<String, V> & m)
|
||||
template <class M>
|
||||
std::pair<typename M::const_iterator, typename M::const_iterator>
|
||||
getFileRangeFromOrderedFilesByPrefix(const String & prefix, const M & m)
|
||||
{
|
||||
static_assert(std::is_same_v<String, typename M::key_type>);
|
||||
|
||||
constexpr auto char_max = std::numeric_limits<String::value_type>::max();
|
||||
auto beg = m.lower_bound(prefix);
|
||||
/// Adding char_max to the end to speed up finding upper bound
|
||||
|
@ -152,10 +154,12 @@ getFileRangeFromOrderedFilesByPrefix(const String & prefix, const std::map<Strin
|
|||
}
|
||||
|
||||
/// Get range of implicit column files from ordered files (e.g. std::map) with a hacking solution
|
||||
template <class V>
|
||||
std::pair<typename std::map<String, V>::const_iterator, typename std::map<String, V>::const_iterator>
|
||||
getMapColumnRangeFromOrderedFiles(const String & map_column, const std::map<String, V> & m)
|
||||
template <class M>
|
||||
std::pair<typename M::const_iterator, typename M::const_iterator>
|
||||
getMapColumnRangeFromOrderedFiles(const String & map_column, const M & m)
|
||||
{
|
||||
static_assert(std::is_same_v<String, typename M::key_type>);
|
||||
|
||||
String map_prefix = escapeForFileName(getMapKeyPrefix(map_column));
|
||||
return getFileRangeFromOrderedFilesByPrefix(map_prefix, m);
|
||||
}
|
||||
|
|
|
@ -159,12 +159,12 @@ private:
|
|||
|
||||
#ifdef USE_EMBEDDED_COMPILER
|
||||
protected:
|
||||
bool isCompilableImpl(const DataTypes & types) const override
|
||||
bool isCompilableImpl(const DataTypes & types) const
|
||||
{
|
||||
return Impl::isCompilable(types);
|
||||
}
|
||||
|
||||
llvm::Value * compileImpl(llvm::IRBuilderBase & b, const DataTypes & types, Values values, JITContext & ) const override
|
||||
llvm::Value * compileImpl(llvm::IRBuilderBase & b, const DataTypes & types, Values values, JITContext & ) const
|
||||
{
|
||||
WhichDataType which_data_type(types[0]);
|
||||
if (which_data_type.isString())
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#include <Functions/FunctionHelpers.h>
|
||||
#include <Functions/FunctionStringOrArrayToT.h>
|
||||
#include <common/map.h>
|
||||
#include <llvm/llvm/include/llvm/IR/IRBuilder.h>
|
||||
#include <llvm-project/llvm/include/llvm/IR/IRBuilder.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
|
|
@ -1943,7 +1943,7 @@ namespace
|
|||
else
|
||||
{
|
||||
Instruction<T> instruction;
|
||||
instruction.setJodaFunc(bind_front(&Instruction<T>::template jodaLiteral<String>, default_literal));
|
||||
instruction.setJodaFunc(std::bind_front(&Instruction<T>::template jodaLiteral<String>, default_literal));
|
||||
instructions.push_back(std::move(instruction));
|
||||
}
|
||||
};
|
||||
|
@ -1962,7 +1962,7 @@ namespace
|
|||
{
|
||||
Instruction<T> instruction;
|
||||
std::string_view literal(cur_token, 1);
|
||||
instruction.setJodaFunc(bind_front(&Instruction<T>::template jodaLiteral<decltype(literal)>, literal));
|
||||
instruction.setJodaFunc(std::bind_front(&Instruction<T>::template jodaLiteral<decltype(literal)>, literal));
|
||||
instructions.push_back(std::move(instruction));
|
||||
++reserve_size;
|
||||
pos += 2;
|
||||
|
@ -1979,7 +1979,7 @@ namespace
|
|||
{
|
||||
Instruction<T> instruction;
|
||||
std::string_view literal(cur_token + i, 1);
|
||||
instruction.setJodaFunc(bind_front(&Instruction<T>::template jodaLiteral<decltype(literal)>, literal));
|
||||
instruction.setJodaFunc(std::bind_front(&Instruction<T>::template jodaLiteral<decltype(literal)>, literal));
|
||||
instructions.push_back(std::move(instruction));
|
||||
++reserve_size;
|
||||
if (*(cur_token + i) == '\'')
|
||||
|
@ -2003,7 +2003,7 @@ namespace
|
|||
case 'G':
|
||||
{
|
||||
Instruction<T> instruction;
|
||||
instruction.setJodaFunc(bind_front(&Instruction<T>::jodaEra, repetitions));
|
||||
instruction.setJodaFunc(std::bind_front(&Instruction<T>::jodaEra, repetitions));
|
||||
instructions.push_back(std::move(instruction));
|
||||
reserve_size += repetitions <= 3 ? 2 : 13;
|
||||
break;
|
||||
|
@ -2011,7 +2011,7 @@ namespace
|
|||
case 'C':
|
||||
{
|
||||
Instruction<T> instruction;
|
||||
instruction.setJodaFunc(bind_front(&Instruction<T>::jodaCenturyOfEra, repetitions));
|
||||
instruction.setJodaFunc(std::bind_front(&Instruction<T>::jodaCenturyOfEra, repetitions));
|
||||
instructions.push_back(std::move(instruction));
|
||||
/// Year range [1900, 2299]
|
||||
reserve_size += std::max(repetitions, 2);
|
||||
|
@ -2020,7 +2020,7 @@ namespace
|
|||
case 'Y':
|
||||
{
|
||||
Instruction<T> instruction;
|
||||
instruction.setJodaFunc(bind_front(&Instruction<T>::jodaYearOfEra, repetitions));
|
||||
instruction.setJodaFunc(std::bind_front(&Instruction<T>::jodaYearOfEra, repetitions));
|
||||
instructions.push_back(std::move(instruction));
|
||||
/// Year range [1900, 2299]
|
||||
reserve_size += repetitions == 2 ? 2 : std::max(repetitions, 4);
|
||||
|
@ -2029,7 +2029,7 @@ namespace
|
|||
case 'x':
|
||||
{
|
||||
Instruction<T> instruction;
|
||||
instruction.setJodaFunc(bind_front(&Instruction<T>::jodaWeekYear, repetitions));
|
||||
instruction.setJodaFunc(std::bind_front(&Instruction<T>::jodaWeekYear, repetitions));
|
||||
instructions.push_back(std::move(instruction));
|
||||
/// weekyear range [1900, 2299]
|
||||
reserve_size += std::max(repetitions, 4);
|
||||
|
@ -2038,7 +2038,7 @@ namespace
|
|||
case 'w':
|
||||
{
|
||||
Instruction<T> instruction;
|
||||
instruction.setJodaFunc(bind_front(&Instruction<T>::jodaWeekOfWeekYear, repetitions));
|
||||
instruction.setJodaFunc(std::bind_front(&Instruction<T>::jodaWeekOfWeekYear, repetitions));
|
||||
instructions.push_back(std::move(instruction));
|
||||
/// Week of weekyear range [1, 52]
|
||||
reserve_size += std::max(repetitions, 2);
|
||||
|
@ -2047,7 +2047,7 @@ namespace
|
|||
case 'e':
|
||||
{
|
||||
Instruction<T> instruction;
|
||||
instruction.setJodaFunc(bind_front(&Instruction<T>::jodaDayOfWeek1Based, repetitions));
|
||||
instruction.setJodaFunc(std::bind_front(&Instruction<T>::jodaDayOfWeek1Based, repetitions));
|
||||
instructions.push_back(std::move(instruction));
|
||||
/// Day of week range [1, 7]
|
||||
reserve_size += std::max(repetitions, 1);
|
||||
|
@ -2056,7 +2056,7 @@ namespace
|
|||
case 'E':
|
||||
{
|
||||
Instruction<T> instruction;
|
||||
instruction.setJodaFunc(bind_front(&Instruction<T>::jodaDayOfWeekText, repetitions));
|
||||
instruction.setJodaFunc(std::bind_front(&Instruction<T>::jodaDayOfWeekText, repetitions));
|
||||
instructions.push_back(std::move(instruction));
|
||||
/// Maximum length of short name is 3, maximum length of full name is 9.
|
||||
reserve_size += repetitions <= 3 ? 3 : 9;
|
||||
|
@ -2065,7 +2065,7 @@ namespace
|
|||
case 'y':
|
||||
{
|
||||
Instruction<T> instruction;
|
||||
instruction.setJodaFunc(bind_front(&Instruction<T>::jodaYear, repetitions));
|
||||
instruction.setJodaFunc(std::bind_front(&Instruction<T>::jodaYear, repetitions));
|
||||
instructions.push_back(std::move(instruction));
|
||||
/// Year range [1900, 2299]
|
||||
reserve_size += repetitions == 2 ? 2 : std::max(repetitions, 4);
|
||||
|
@ -2074,7 +2074,7 @@ namespace
|
|||
case 'D':
|
||||
{
|
||||
Instruction<T> instruction;
|
||||
instruction.setJodaFunc(bind_front(&Instruction<T>::jodaDayOfYear, repetitions));
|
||||
instruction.setJodaFunc(std::bind_front(&Instruction<T>::jodaDayOfYear, repetitions));
|
||||
instructions.push_back(std::move(instruction));
|
||||
/// Day of year range [1, 366]
|
||||
reserve_size += std::max(repetitions, 3);
|
||||
|
@ -2085,7 +2085,7 @@ namespace
|
|||
if (repetitions <= 2)
|
||||
{
|
||||
Instruction<T> instruction;
|
||||
instruction.setJodaFunc(bind_front(&Instruction<T>::jodaMonthOfYear, repetitions));
|
||||
instruction.setJodaFunc(std::bind_front(&Instruction<T>::jodaMonthOfYear, repetitions));
|
||||
instructions.push_back(std::move(instruction));
|
||||
/// Month of year range [1, 12]
|
||||
reserve_size += 2;
|
||||
|
@ -2093,7 +2093,7 @@ namespace
|
|||
else
|
||||
{
|
||||
Instruction<T> instruction;
|
||||
instruction.setJodaFunc(bind_front(&Instruction<T>::jodaMonthOfYearText, repetitions));
|
||||
instruction.setJodaFunc(std::bind_front(&Instruction<T>::jodaMonthOfYearText, repetitions));
|
||||
instructions.push_back(std::move(instruction));
|
||||
/// Maximum length of short name is 3, maximum length of full name is 9.
|
||||
reserve_size += repetitions <= 3 ? 3 : 9;
|
||||
|
@ -2103,7 +2103,7 @@ namespace
|
|||
case 'd':
|
||||
{
|
||||
Instruction<T> instruction;
|
||||
instruction.setJodaFunc(bind_front(&Instruction<T>::jodaDayOfMonth, repetitions));
|
||||
instruction.setJodaFunc(std::bind_front(&Instruction<T>::jodaDayOfMonth, repetitions));
|
||||
instructions.push_back(std::move(instruction));
|
||||
/// Day of month range [1, 3]
|
||||
reserve_size += std::max(repetitions, 3);
|
||||
|
@ -2111,45 +2111,45 @@ namespace
|
|||
}
|
||||
case 'a':
|
||||
/// Default half day of day is "AM"
|
||||
add_instruction(bind_front(&Instruction<T>::jodaHalfDayOfDay, repetitions), "AM");
|
||||
add_instruction(std::bind_front(&Instruction<T>::jodaHalfDayOfDay, repetitions), "AM");
|
||||
reserve_size += 2;
|
||||
break;
|
||||
case 'K':
|
||||
/// Default hour of half day is 0
|
||||
add_instruction(
|
||||
bind_front(&Instruction<T>::jodaHourOfHalfDay, repetitions), padValue(0, repetitions));
|
||||
std::bind_front(&Instruction<T>::jodaHourOfHalfDay, repetitions), padValue(0, repetitions));
|
||||
/// Hour of half day range [0, 11]
|
||||
reserve_size += std::max(repetitions, 2);
|
||||
break;
|
||||
case 'h':
|
||||
/// Default clock hour of half day is 12
|
||||
add_instruction(
|
||||
bind_front(&Instruction<T>::jodaClockHourOfHalfDay, repetitions),
|
||||
std::bind_front(&Instruction<T>::jodaClockHourOfHalfDay, repetitions),
|
||||
padValue(12, repetitions));
|
||||
/// Clock hour of half day range [1, 12]
|
||||
reserve_size += std::max(repetitions, 2);
|
||||
break;
|
||||
case 'H':
|
||||
/// Default hour of day is 0
|
||||
add_instruction(bind_front(&Instruction<T>::jodaHourOfDay, repetitions), padValue(0, repetitions));
|
||||
add_instruction(std::bind_front(&Instruction<T>::jodaHourOfDay, repetitions), padValue(0, repetitions));
|
||||
/// Hour of day range [0, 23]
|
||||
reserve_size += std::max(repetitions, 2);
|
||||
break;
|
||||
case 'k':
|
||||
/// Default clock hour of day is 24
|
||||
add_instruction(bind_front(&Instruction<T>::jodaClockHourOfDay, repetitions), padValue(24, repetitions));
|
||||
add_instruction(std::bind_front(&Instruction<T>::jodaClockHourOfDay, repetitions), padValue(24, repetitions));
|
||||
/// Clock hour of day range [1, 24]
|
||||
reserve_size += std::max(repetitions, 2);
|
||||
break;
|
||||
case 'm':
|
||||
/// Default minute of hour is 0
|
||||
add_instruction(bind_front(&Instruction<T>::jodaMinuteOfHour, repetitions), padValue(0, repetitions));
|
||||
add_instruction(std::bind_front(&Instruction<T>::jodaMinuteOfHour, repetitions), padValue(0, repetitions));
|
||||
/// Minute of hour range [0, 59]
|
||||
reserve_size += std::max(repetitions, 2);
|
||||
break;
|
||||
case 's':
|
||||
/// Default second of minute is 0
|
||||
add_instruction(bind_front(&Instruction<T>::jodaSecondOfMinute, repetitions), padValue(0, repetitions));
|
||||
add_instruction(std::bind_front(&Instruction<T>::jodaSecondOfMinute, repetitions), padValue(0, repetitions));
|
||||
/// Second of minute range [0, 59]
|
||||
reserve_size += std::max(repetitions, 2);
|
||||
break;
|
||||
|
@ -2157,7 +2157,7 @@ namespace
|
|||
{
|
||||
/// Default fraction of second is 0
|
||||
Instruction<T> instruction;
|
||||
instruction.setJodaFunc(bind_front(&Instruction<T>::jodaFractionOfSecond, repetitions));
|
||||
instruction.setJodaFunc(std::bind_front(&Instruction<T>::jodaFractionOfSecond, repetitions));
|
||||
instructions.push_back(std::move(instruction));
|
||||
/// 'S' repetitions range [0, 9]
|
||||
reserve_size += repetitions <= 9 ? repetitions : 9;
|
||||
|
@ -2169,7 +2169,7 @@ namespace
|
|||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Short name time zone is not yet supported");
|
||||
|
||||
Instruction<T> instruction;
|
||||
instruction.setJodaFunc(bind_front(&Instruction<T>::jodaTimezone, repetitions));
|
||||
instruction.setJodaFunc(std::bind_front(&Instruction<T>::jodaTimezone, repetitions));
|
||||
instructions.push_back(std::move(instruction));
|
||||
/// Longest length of full name of time zone is 32.
|
||||
reserve_size += 32;
|
||||
|
@ -2184,7 +2184,7 @@ namespace
|
|||
|
||||
Instruction<T> instruction;
|
||||
std::string_view literal(cur_token, pos - cur_token);
|
||||
instruction.setJodaFunc(bind_front(&Instruction<T>::template jodaLiteral<decltype(literal)>, literal));
|
||||
instruction.setJodaFunc(std::bind_front(&Instruction<T>::template jodaLiteral<decltype(literal)>, literal));
|
||||
instructions.push_back(std::move(instruction));
|
||||
reserve_size += pos - cur_token;
|
||||
break;
|
||||
|
|
|
@ -3,12 +3,10 @@
|
|||
#include <Functions/FunctionHelpers.h>
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
#include <Columns/ColumnConst.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <Columns/ColumnSet.h>
|
||||
#include <Columns/ColumnLowCardinality.h>
|
||||
#include <Interpreters/Set.h>
|
||||
|
||||
|
||||
|
@ -69,12 +67,6 @@ public:
|
|||
return 2;
|
||||
}
|
||||
|
||||
/// Do not use default implementation for LowCardinality.
|
||||
/// For now, Set may be const or non const column, depending on how it was created.
|
||||
/// But we will return UInt8 for any case.
|
||||
/// TODO: we could use special implementation later.
|
||||
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & /*arguments*/) const override
|
||||
{
|
||||
return std::make_shared<DataTypeUInt8>();
|
||||
|
@ -138,35 +130,16 @@ public:
|
|||
else
|
||||
columns_of_key_columns.insert(left_arg);
|
||||
|
||||
/// Replace single LowCardinality column to it's dictionary if possible.
|
||||
ColumnPtr lc_indexes = nullptr;
|
||||
if (columns_of_key_columns.columns() == 1)
|
||||
{
|
||||
auto & arg = columns_of_key_columns.safeGetByPosition(0);
|
||||
const auto * col = arg.column.get();
|
||||
if (const auto * const_col = typeid_cast<const ColumnConst *>(col))
|
||||
col = &const_col->getDataColumn();
|
||||
|
||||
if (const auto * lc = typeid_cast<const ColumnLowCardinality *>(col))
|
||||
{
|
||||
if (!lc->isFullState())
|
||||
{
|
||||
lc_indexes = lc->getIndexesPtr();
|
||||
arg.column = lc->getDictionary().getNestedColumn();
|
||||
}
|
||||
else
|
||||
{
|
||||
arg.column = lc->getNestedColumnPtr();
|
||||
}
|
||||
arg.type = removeLowCardinality(arg.type);
|
||||
}
|
||||
}
|
||||
|
||||
auto res = set->execute(columns_of_key_columns, negative);
|
||||
|
||||
if (lc_indexes)
|
||||
return res->index(*lc_indexes, 0);
|
||||
|
||||
return res;
|
||||
}
|
||||
};
|
||||
|
|
|
@ -73,28 +73,28 @@ struct LengthUTF8Impl
|
|||
{
|
||||
throw Exception("Cannot apply function lengthUTF8 to IPv4 argument", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
}
|
||||
|
||||
static bool isCompilable(const DataTypes & )
|
||||
|
||||
[[maybe_unused]] static bool isCompilable(const DataTypes & )
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static llvm::Value * compileString(llvm::IRBuilderBase & , const DataTypes & , Values & )
|
||||
[[maybe_unused]] static llvm::Value * compileString(llvm::IRBuilderBase & , const DataTypes & , Values & )
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
static llvm::Value * compileFixedString(llvm::IRBuilderBase & , const DataTypes & , Values & )
|
||||
[[maybe_unused]] static llvm::Value * compileFixedString(llvm::IRBuilderBase & , const DataTypes & , Values & )
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
static llvm::Value * compileArray(llvm::IRBuilderBase & , const DataTypes & , Values & )
|
||||
[[maybe_unused]] static llvm::Value * compileArray(llvm::IRBuilderBase & , const DataTypes & , Values & )
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
static llvm::Value * compileMap(llvm::IRBuilderBase & , const DataTypes & , Values & )
|
||||
[[maybe_unused]] static llvm::Value * compileMap(llvm::IRBuilderBase & , const DataTypes & , Values & )
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
static llvm::Value * compileUuid(llvm::IRBuilderBase & , const DataTypes & , Values & )
|
||||
[[maybe_unused]] static llvm::Value * compileUuid(llvm::IRBuilderBase & , const DataTypes & , Values & )
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
|
|
@ -99,8 +99,9 @@
|
|||
#include <Interpreters/VirtualWarehouseQueue.h>
|
||||
#include <Interpreters/WorkerGroupHandle.h>
|
||||
#include <Interpreters/WorkerStatusManager.h>
|
||||
#include <MergeTreeCommon/CnchServerManager.h>
|
||||
#include <MergeTreeCommon/CnchServerLeader.h>
|
||||
#include <MergeTreeCommon/CnchServerTopology.h>
|
||||
#include <MergeTreeCommon/CnchTopologyManager.h>
|
||||
#include <MergeTreeCommon/CnchTopologyMaster.h>
|
||||
#include <MergeTreeCommon/GlobalDataManager.h>
|
||||
#include <Optimizer/OptimizerMetrics.h>
|
||||
|
@ -427,7 +428,8 @@ struct ContextSharedPart
|
|||
mutable ServiceDiscoveryClientPtr sd;
|
||||
mutable PartCacheManagerPtr cache_manager; /// Manage cache of parts for cnch tables.
|
||||
mutable std::shared_ptr<Catalog::Catalog> cnch_catalog;
|
||||
mutable CnchServerManagerPtr server_manager;
|
||||
mutable CnchServerLeaderPtr server_manager;
|
||||
mutable CnchTopologyManagerPtr topology_manager;
|
||||
mutable CnchTopologyMasterPtr topology_master;
|
||||
mutable ResourceManagerClientPtr rm_client;
|
||||
mutable std::unique_ptr<VirtualWarehousePool> vw_pool;
|
||||
|
@ -595,6 +597,9 @@ struct ContextSharedPart
|
|||
if (server_manager)
|
||||
server_manager->shutDown();
|
||||
|
||||
if (topology_manager)
|
||||
topology_manager->shutDown();
|
||||
|
||||
if (topology_master)
|
||||
topology_master->shutDown();
|
||||
|
||||
|
@ -3722,7 +3727,7 @@ HostWithPorts Context::getHostWithPorts() const
|
|||
id = host;
|
||||
|
||||
return HostWithPorts{
|
||||
std::move(host), getRPCPort(), getTCPPort(), getHTTPPort(), getExchangePort(), getExchangeStatusPort(), std::move(id)};
|
||||
std::move(host), getRPCPort(), getTCPPort(), getHTTPPort(), std::move(id)};
|
||||
};
|
||||
|
||||
static HostWithPorts cache = get_host_with_port();
|
||||
|
@ -5354,16 +5359,16 @@ DaemonManagerClientPtr Context::getDaemonManagerClient() const
|
|||
return shared->daemon_manager_pool->get();
|
||||
}
|
||||
|
||||
void Context::setCnchServerManager(const Poco::Util::AbstractConfiguration & config)
|
||||
void Context::setCnchServerLeader([[maybe_unused]] const Poco::Util::AbstractConfiguration & configs)
|
||||
{
|
||||
auto lock = getLock(); // checked
|
||||
if (shared->server_manager)
|
||||
throw Exception("Server manager has been already created.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
shared->server_manager = std::make_shared<CnchServerManager>(shared_from_this(), config);
|
||||
shared->server_manager = std::make_shared<CnchServerLeader>(shared_from_this());
|
||||
}
|
||||
|
||||
std::shared_ptr<CnchServerManager> Context::getCnchServerManager() const
|
||||
std::shared_ptr<CnchServerLeader> Context::getCnchServerLeader() const
|
||||
{
|
||||
auto lock = getLock(); // checked
|
||||
if (!shared->server_manager)
|
||||
|
@ -5372,15 +5377,26 @@ std::shared_ptr<CnchServerManager> Context::getCnchServerManager() const
|
|||
return shared->server_manager;
|
||||
}
|
||||
|
||||
void Context::updateServerVirtualWarehouses(const ConfigurationPtr & config)
|
||||
void Context::updateCnchTopologyManager(const Poco::Util::AbstractConfiguration & config)
|
||||
{
|
||||
std::shared_ptr<CnchServerManager> server_manager;
|
||||
/// For server, config.cnch_enable_server_topology_manager will control
|
||||
/// whether to enable topology manager. (disabled by default)
|
||||
if (getApplicationType() == ApplicationType::SERVER)
|
||||
{
|
||||
auto lock = getLock(); // checked
|
||||
server_manager = shared->server_manager;
|
||||
auto lock = getLock();
|
||||
if (config.getBool("cnch_enable_server_topology_manager", false))
|
||||
{
|
||||
if (!shared->topology_manager)
|
||||
shared->topology_manager = std::make_shared<CnchTopologyManager>(shared_from_this(), config);
|
||||
/// Always update server virtual warehouses.
|
||||
shared->topology_manager->updateServerVirtualWarehouses(config);
|
||||
}
|
||||
else
|
||||
{
|
||||
/// If topology manager is disabled, we need to clear it.
|
||||
shared->topology_manager = nullptr;
|
||||
}
|
||||
}
|
||||
if (server_manager)
|
||||
server_manager->updateServerVirtualWarehouses(*config);
|
||||
}
|
||||
|
||||
void Context::setCnchTopologyMaster()
|
||||
|
|
|
@ -277,9 +277,10 @@ class DeleteBitmapCache;
|
|||
class PartCacheManager;
|
||||
class IServiceDiscovery;
|
||||
using ServiceDiscoveryClientPtr = std::shared_ptr<IServiceDiscovery>;
|
||||
class CnchTopologyMaster;
|
||||
class CnchServerTopology;
|
||||
class CnchServerManager;
|
||||
class CnchServerLeader;
|
||||
class CnchTopologyManager;
|
||||
class CnchTopologyMaster;
|
||||
struct RootConfiguration;
|
||||
class TxnTimestamp;
|
||||
class TransactionCoordinatorRcCnch;
|
||||
|
@ -825,7 +826,7 @@ public:
|
|||
void checkAccess(const AccessRightsElements & elements) const;
|
||||
|
||||
|
||||
bool isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) const;
|
||||
bool isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) const;
|
||||
bool isGranted(const AccessFlags & flags, const StorageID & table_id, const std::string_view & column) const;
|
||||
|
||||
void grantAllAccess();
|
||||
|
@ -1618,9 +1619,9 @@ public:
|
|||
void initDaemonManagerClientPool(const String & service_name);
|
||||
DaemonManagerClientPtr getDaemonManagerClient() const;
|
||||
|
||||
void setCnchServerManager(const Poco::Util::AbstractConfiguration & config);
|
||||
std::shared_ptr<CnchServerManager> getCnchServerManager() const;
|
||||
void updateServerVirtualWarehouses(const ConfigurationPtr & config);
|
||||
void setCnchServerLeader(const Poco::Util::AbstractConfiguration & config);
|
||||
std::shared_ptr<CnchServerLeader> getCnchServerLeader() const;
|
||||
void updateCnchTopologyManager(const Poco::Util::AbstractConfiguration & config);
|
||||
void setCnchTopologyMaster();
|
||||
std::shared_ptr<CnchTopologyMaster> getCnchTopologyMaster() const;
|
||||
|
||||
|
@ -1806,7 +1807,7 @@ private:
|
|||
|
||||
template <typename... Args>
|
||||
void checkAccessImpl(const Args &... args) const;
|
||||
|
||||
|
||||
template <typename... Args>
|
||||
bool isGrantedImpl(const Args &... args) const;
|
||||
|
||||
|
|
|
@ -5,7 +5,11 @@
|
|||
#include <limits>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <Catalog/DataModelPartWrapper_fwd.h>
|
||||
#include <CloudServices/CnchPartsHelper.h>
|
||||
#include <CloudServices/CnchServerResource.h>
|
||||
#include <Interpreters/DistributedStages/SourceTask.h>
|
||||
#include <MergeTreeCommon/assignCnchParts.h>
|
||||
#include <bthread/mutex.h>
|
||||
#include <Poco/Logger.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
|
@ -31,6 +35,7 @@
|
|||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <utility>
|
||||
#include <pdqsort.h>
|
||||
#include <CloudServices/CnchServerResource.h>
|
||||
|
||||
namespace ProfileEvents
|
||||
|
@ -711,6 +716,57 @@ void BSPScheduler::sendResources(PlanSegment * plan_segment_ptr)
|
|||
}
|
||||
}
|
||||
|
||||
std::unordered_map<UUID, SourceTaskStat> BSPScheduler::createSourceTaskStats(
|
||||
PlanSegment * plan_segment_ptr, const SegmentTaskInstance & instance, const SourceTaskFilter & source_task_filter)
|
||||
{
|
||||
std::unordered_map<UUID, SourceTaskStat> source_task_stats;
|
||||
const auto & source_task_payload_map = query_context->getCnchServerResource()->getSourceTaskPayload();
|
||||
AddressInfo addr;
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(node_selector_result_mutex);
|
||||
addr = node_selector_result[instance.segment_id].worker_nodes[instance.parallel_index].address;
|
||||
}
|
||||
for (const auto & plan_segment_input : plan_segment_ptr->getPlanSegmentInputs())
|
||||
{
|
||||
auto storage_id = plan_segment_input->getStorageID();
|
||||
if (storage_id && storage_id->hasUUID())
|
||||
{
|
||||
if (auto iter = source_task_payload_map.find(storage_id->uuid); iter != source_task_payload_map.end())
|
||||
{
|
||||
auto source_task_payload_iter = iter->second.find(addr);
|
||||
auto [iiter, _] = source_task_stats.emplace(storage_id->uuid, SourceTaskStat(storage_id.value(), 0));
|
||||
if (source_task_payload_iter != iter->second.end())
|
||||
{
|
||||
auto visible_parts = source_task_payload_iter->second.visible_parts;
|
||||
/// pdqsort is unstable, but all visible parts share different ids by CnchPartsHelper::PartComparator,
|
||||
/// so worker and server will share the same order
|
||||
/// refer to calcVisiblePartsImpl for more details
|
||||
pdqsort(visible_parts.begin(), visible_parts.end(), CnchPartsHelper::PartComparator<ServerDataPartPtr>{});
|
||||
filterParts(visible_parts, source_task_filter);
|
||||
for (const auto & part : visible_parts)
|
||||
{
|
||||
iiter->second.rows += part->rowExistsCount();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (log->trace())
|
||||
{
|
||||
for (const auto & [_, stat] : source_task_stats)
|
||||
{
|
||||
LOG_TRACE(
|
||||
log,
|
||||
"SourceTaskStats(table:{}) of segment instance({}_{}) contains {} rows",
|
||||
stat.storage_id.getFullTableName(),
|
||||
instance.segment_id,
|
||||
instance.parallel_index,
|
||||
stat.rows);
|
||||
}
|
||||
}
|
||||
return source_task_stats;
|
||||
}
|
||||
|
||||
void BSPScheduler::prepareTask(PlanSegment * plan_segment_ptr, NodeSelectorResult & selector_info, const SegmentTask & task)
|
||||
{
|
||||
// Register exchange for all outputs.
|
||||
|
@ -762,6 +818,10 @@ PlanSegmentExecutionInfo BSPScheduler::generateExecutionInfo(size_t task_id, siz
|
|||
execution_info.source_task_filter.buckets = source_task_buckets[instance];
|
||||
}
|
||||
|
||||
auto source_task_stats = createSourceTaskStats(dag_graph_ptr->getPlanSegmentPtr(task_id), instance, execution_info.source_task_filter);
|
||||
if (!source_task_stats.empty())
|
||||
execution_info.source_task_stats = source_task_stats;
|
||||
|
||||
PlanSegmentInstanceId instance_id = PlanSegmentInstanceId{static_cast<UInt32>(task_id), static_cast<UInt32>(index)};
|
||||
{
|
||||
std::unique_lock<std::mutex> lk(nodes_alloc_mutex);
|
||||
|
|
|
@ -253,6 +253,8 @@ private:
|
|||
void resendResource(const HostWithPorts & host_ports);
|
||||
|
||||
Protos::SendResourceRequestReq fillResourceRequestToProto(const ResourceRequest & req);
|
||||
std::unordered_map<UUID, SourceTaskStat> createSourceTaskStats(
|
||||
PlanSegment * plan_segment_ptr, const SegmentTaskInstance & instance, const SourceTaskFilter & source_task_filter);
|
||||
|
||||
// All batch task will be enqueue first. The schedule logic will pop queue and schedule the poped tasks.
|
||||
EventQueue queue{10000};
|
||||
|
|
|
@ -29,9 +29,11 @@
|
|||
#include <Interpreters/ProcessList.h>
|
||||
#include <Interpreters/ProcessorProfile.h>
|
||||
#include <Interpreters/ProcessorsProfileLog.h>
|
||||
#include <Interpreters/QueryExchangeLog.h>
|
||||
#include <Interpreters/RuntimeFilter/RuntimeFilterManager.h>
|
||||
#include <Interpreters/executeQueryHelper.h>
|
||||
#include <Interpreters/sendPlanSegment.h>
|
||||
#include <MergeTreeCommon/assignCnchParts.h>
|
||||
#include <Optimizer/Signature/PlanSegmentNormalizer.h>
|
||||
#include <Optimizer/Signature/PlanSignature.h>
|
||||
#include <Processors/Exchange/BroadcastExchangeSink.h>
|
||||
|
@ -104,6 +106,7 @@ namespace ErrorCodes
|
|||
void PlanSegmentExecutor::prepareSegmentInfo() const
|
||||
{
|
||||
query_log_element->client_info = context->getClientInfo();
|
||||
query_log_element->txn_id = context->getCurrentTransactionID();
|
||||
query_log_element->segment_id = plan_segment->getPlanSegmentId();
|
||||
query_log_element->segment_parallel = plan_segment->getParallelSize();
|
||||
query_log_element->segment_parallel_index = plan_segment_instance->info.parallel_id;
|
||||
|
@ -158,6 +161,27 @@ PlanSegmentExecutor::~PlanSegmentExecutor() noexcept
|
|||
if (auto query_log = context->getQueryLog())
|
||||
query_log->add(*query_log_element);
|
||||
}
|
||||
if (context->getSettingsRef().log_query_exchange && context->getSettingsRef().bsp_mode)
|
||||
{
|
||||
if (auto query_exchange_log = context->getQueryExchangeLog())
|
||||
{
|
||||
for (const auto & [uuid, stat] : plan_segment_instance->info.source_task_stats)
|
||||
{
|
||||
QueryExchangeLogElement element;
|
||||
element.txn_id = context->getCurrentTransactionID();
|
||||
element.initial_query_id = context->getInitialQueryId();
|
||||
element.parallel_index = plan_segment_instance->info.parallel_id;
|
||||
element.read_segment = plan_segment->getPlanSegmentId();
|
||||
element.event_time
|
||||
= std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now().time_since_epoch()).count();
|
||||
element.recv_rows = stat.rows;
|
||||
element.type = fmt::format("table_scan_{}", stat.storage_id.getFullNameNotQuoted());
|
||||
element.finish_code = query_log_element->type == QUERY_FINISH ? BroadcastStatusCode::ALL_SENDERS_DONE
|
||||
: BroadcastStatusCode::RECV_UNKNOWN_ERROR;
|
||||
query_exchange_log->add(element);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
|
|
@ -68,6 +68,7 @@ struct PlanSegmentExecutionInfo
|
|||
UInt32 attempt_id = std::numeric_limits<UInt32>::max();
|
||||
std::unordered_map<UInt64, std::vector<PlanSegmentMultiPartitionSource>> sources;
|
||||
UInt32 worker_epoch{0};
|
||||
std::unordered_map<UUID, SourceTaskStat> source_task_stats;
|
||||
};
|
||||
|
||||
struct PlanSegmentInstance
|
||||
|
|
|
@ -24,9 +24,11 @@
|
|||
#include <Interpreters/DistributedStages/PlanSegmentInstance.h>
|
||||
#include <Interpreters/DistributedStages/PlanSegmentManagerRpcService.h>
|
||||
#include <Interpreters/DistributedStages/PlanSegmentReport.h>
|
||||
#include <Interpreters/DistributedStages/SourceTask.h>
|
||||
#include <Interpreters/DistributedStages/executePlanSegment.h>
|
||||
#include <Interpreters/NamedSession.h>
|
||||
#include <Processors/Exchange/DataTrans/Brpc/ReadBufferFromBrpcBuf.h>
|
||||
#include <Protos/RPCHelpers.h>
|
||||
#include <Protos/cnch_worker_rpc.pb.h>
|
||||
#include <Protos/plan_segment_manager.pb.h>
|
||||
#include <brpc/controller.h>
|
||||
|
@ -631,6 +633,9 @@ void PlanSegmentManagerRpcService::submitPlanSegment(
|
|||
}
|
||||
}
|
||||
|
||||
for (const auto & iter : request->source_task_stats())
|
||||
execution_info.source_task_stats.emplace(RPCHelpers::createUUID(iter.storage_id().uuid()), SourceTaskStat::fromProto(iter));
|
||||
|
||||
butil::IOBuf plan_segment_buf;
|
||||
auto plan_segment_buf_size = cntl->request_attachment().cutn(&plan_segment_buf, request->plan_segment_buf_size());
|
||||
if (plan_segment_buf_size != request->plan_segment_buf_size())
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
#include <Interpreters/DistributedStages/SourceTask.h>
|
||||
#include "common/types.h"
|
||||
#include <Protos/RPCHelpers.h>
|
||||
#include <Protos/plan_segment_manager.pb.h>
|
||||
#include <common/types.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
|
@ -40,4 +42,18 @@ void SourceTaskFilter::fromProto(const Protos::SourceTaskFilter & proto)
|
|||
}
|
||||
}
|
||||
|
||||
Protos::SourceTaskStat SourceTaskStat::toProto() const
|
||||
{
|
||||
Protos::SourceTaskStat proto;
|
||||
storage_id.toProto(*proto.mutable_storage_id());
|
||||
proto.set_rows(rows);
|
||||
|
||||
return proto;
|
||||
}
|
||||
|
||||
SourceTaskStat SourceTaskStat::fromProto(const Protos::SourceTaskStat & proto)
|
||||
{
|
||||
return {RPCHelpers::createStorageID(proto.storage_id()), proto.rows()};
|
||||
}
|
||||
|
||||
} // namespace DB
|
||||
|
|
|
@ -3,6 +3,9 @@
|
|||
#include <map>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <Catalog/DataModelPartWrapper_fwd.h>
|
||||
#include <Core/Types.h>
|
||||
#include <Interpreters/StorageID.h>
|
||||
#include <Protos/plan_segment_manager.pb.h>
|
||||
#include <boost/algorithm/string/join.hpp>
|
||||
#include <boost/range/adaptor/transformed.hpp>
|
||||
|
@ -15,6 +18,7 @@ namespace DB
|
|||
struct SourceTaskPayload
|
||||
{
|
||||
std::set<Int64> buckets;
|
||||
ServerDataPartsVector visible_parts;
|
||||
size_t rows = 0;
|
||||
size_t part_num = 0;
|
||||
String toString() const
|
||||
|
@ -27,6 +31,17 @@ struct SourceTaskPayload
|
|||
}
|
||||
};
|
||||
|
||||
struct SourceTaskStat
|
||||
{
|
||||
SourceTaskStat(StorageID storage_id_, size_t rows_) : storage_id(storage_id_), rows(rows_)
|
||||
{
|
||||
}
|
||||
StorageID storage_id;
|
||||
size_t rows;
|
||||
Protos::SourceTaskStat toProto() const;
|
||||
static SourceTaskStat fromProto(const Protos::SourceTaskStat & proto);
|
||||
};
|
||||
|
||||
struct SourceTaskPayloadOnWorker
|
||||
{
|
||||
String worker_id;
|
||||
|
|
|
@ -219,6 +219,9 @@ void executePlanSegmentRemotelyWithPreparedBuf(
|
|||
}
|
||||
}
|
||||
|
||||
for (const auto & iter : execution_info.source_task_stats)
|
||||
*request.add_source_task_stats() = iter.second.toProto();
|
||||
|
||||
if (execution_info.worker_epoch > 0)
|
||||
request.set_worker_epoch(execution_info.worker_epoch);
|
||||
|
||||
|
|
|
@ -37,9 +37,10 @@
|
|||
#include <DataTypes/DataTypeString.h>
|
||||
#include <Functions/FunctionsConversion.h>
|
||||
|
||||
#include <Interpreters/Aggregator.h>
|
||||
#include <Interpreters/ArrayJoinAction.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/ConcurrentHashJoin.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/DictionaryReader.h>
|
||||
#include <Interpreters/ExpressionActions.h>
|
||||
#include <Interpreters/ExpressionAnalyzer.h>
|
||||
|
@ -54,6 +55,7 @@
|
|||
#include <Interpreters/evaluateConstantExpression.h>
|
||||
#include <Interpreters/replaceForPositionalArguments.h>
|
||||
|
||||
#include <QueryPlan/AggregatingStep.h>
|
||||
#include <QueryPlan/ExpressionStep.h>
|
||||
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
|
@ -2110,7 +2112,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult(
|
|||
bool finalized = false;
|
||||
size_t where_step_num = 0;
|
||||
|
||||
auto finalize_chain = [&](ExpressionActionsChain & chain) {
|
||||
auto finalize_chain = [&](ExpressionActionsChain & chain) -> ColumnsWithTypeAndName {
|
||||
chain.finalize();
|
||||
|
||||
if (!finalized)
|
||||
|
@ -2119,9 +2121,9 @@ ExpressionAnalysisResult::ExpressionAnalysisResult(
|
|||
finalized = true;
|
||||
}
|
||||
|
||||
// fmt::print("After finallize ---------- \n{}\n", chain.dumpChain());
|
||||
|
||||
auto res = chain.getLastStep().getResultColumns();
|
||||
chain.clear();
|
||||
return res;
|
||||
};
|
||||
|
||||
if (storage)
|
||||
|
@ -2270,7 +2272,56 @@ ExpressionAnalysisResult::ExpressionAnalysisResult(
|
|||
query_analyzer.appendAggregateFunctionsArguments(chain, only_types || !first_stage);
|
||||
before_aggregation = chain.getLastActions();
|
||||
|
||||
finalize_chain(chain);
|
||||
auto columns_before_aggregation = finalize_chain(chain);
|
||||
|
||||
/// Here we want to check that columns after aggregation have the same type as
|
||||
/// were promised in query_analyzer.aggregated_columns
|
||||
/// Ideally, they should be equal. In practice, this may be not true.
|
||||
/// As an example, we don't build sets for IN inside ExpressionAnalysis::analyzeAggregation,
|
||||
/// so that constant folding for expression (1 in 1) will not work. This may change the return type
|
||||
/// for functions with LowCardinality argument: function "substr(toLowCardinality('abc'), 1 IN 1)"
|
||||
/// should usually return LowCardinality(String) when (1 IN 1) is constant, but without built set
|
||||
/// for (1 IN 1) constant is not propagated and "substr" returns String type.
|
||||
/// See 02503_in_lc_const_args_bug.sql
|
||||
///
|
||||
/// As a temporary solution, we add converting actions to the next chain.
|
||||
/// Hopefully, later we can
|
||||
/// * use a new analyzer where this issue is absent
|
||||
/// * or remove ExpressionActionsChain completely and re-implement its logic on top of the query plan
|
||||
{
|
||||
for (auto & col : columns_before_aggregation)
|
||||
if (!col.column)
|
||||
col.column = col.type->createColumn();
|
||||
|
||||
Block header_before_aggregation(std::move(columns_before_aggregation));
|
||||
|
||||
auto names = query_analyzer.aggregationKeys().getNames();
|
||||
ColumnNumbers keys;
|
||||
for (const auto & name : names)
|
||||
keys.push_back(header_before_aggregation.getPositionByName(name));
|
||||
const auto & aggregates = query_analyzer.aggregates();
|
||||
|
||||
bool has_grouping = query_analyzer.group_by_kind != GroupByKind::ORDINARY;
|
||||
auto actual_header = Aggregator::Params::getHeader(header_before_aggregation, {}, keys, aggregates, /*final*/ true);
|
||||
actual_header = AggregatingStep::appendGroupingColumn(std::move(actual_header), has_grouping);
|
||||
|
||||
Block expected_header;
|
||||
for (const auto & expected : query_analyzer.aggregated_columns)
|
||||
expected_header.insert(ColumnWithTypeAndName(expected.type, expected.name));
|
||||
|
||||
if (!blocksHaveEqualStructure(actual_header, expected_header))
|
||||
{
|
||||
auto converting = ActionsDAG::makeConvertingActions(
|
||||
actual_header.getColumnsWithTypeAndName(),
|
||||
expected_header.getColumnsWithTypeAndName(),
|
||||
ActionsDAG::MatchColumnsMode::Name,
|
||||
true);
|
||||
|
||||
auto & step = chain.lastStep(query_analyzer.aggregated_columns);
|
||||
auto & actions = step.actions();
|
||||
actions = ActionsDAG::merge(std::move(*actions), std::move(*converting));
|
||||
}
|
||||
}
|
||||
|
||||
if (query_analyzer.appendHaving(chain, only_types || !second_stage))
|
||||
{
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#include <Common/escapeForFileName.h>
|
||||
#include <Common/ShellCommand.h>
|
||||
#include <MergeTreeCommon/CnchServerTopology.h>
|
||||
#include <MergeTreeCommon/CnchServerManager.h>
|
||||
#include <MergeTreeCommon/CnchServerLeader.h>
|
||||
#include <MergeTreeCommon/CnchTopologyMaster.h>
|
||||
#include <MergeTreeCommon/GlobalGCManager.h>
|
||||
#include <CloudServices/CnchBGThreadCommon.h>
|
||||
|
@ -1319,9 +1319,6 @@ void InterpreterSystemQuery::executeDedup(const ASTSystemQuery & query)
|
|||
void InterpreterSystemQuery::dumpCnchServerStatus()
|
||||
{
|
||||
auto context = getContext();
|
||||
auto server_manager = context->getCnchServerManager();
|
||||
if (server_manager)
|
||||
server_manager->dumpServerStatus();
|
||||
auto topology_master = context->getCnchTopologyMaster();
|
||||
if (topology_master)
|
||||
topology_master->dumpStatus();
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#pragma once
|
||||
#include <vector>
|
||||
#include <llvm/llvm/include/llvm/IR/Module.h>
|
||||
#include <llvm-project/llvm/include/llvm/IR/Module.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
|
|
@ -15,13 +15,20 @@
|
|||
|
||||
#include <Interpreters/NamedSession.h>
|
||||
|
||||
#include <Common/setThreadName.h>
|
||||
#include <CloudServices/CnchWorkerResource.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Transaction/TxnTimestamp.h>
|
||||
#include <CloudServices/CnchWorkerResource.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/setThreadName.h>
|
||||
|
||||
#include <chrono>
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
extern const Metric ActiveCnchSession;
|
||||
extern const Metric ActiveHttpSession;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
@ -58,25 +65,26 @@ std::shared_ptr<NamedSession> NamedSessionsImpl<NamedSession>::acquireSession(
|
|||
bool throw_if_not_found,
|
||||
bool return_null_if_not_found)
|
||||
{
|
||||
Poco::Timestamp current_time;
|
||||
std::unique_lock lock(mutex);
|
||||
|
||||
auto it = sessions.find(session_id);
|
||||
if (it == sessions.end())
|
||||
auto & sessions_by_key = sessions.template get<1>();
|
||||
auto it = sessions_by_key.find(session_id);
|
||||
bool session_exist = true;
|
||||
if (it == sessions_by_key.end())
|
||||
{
|
||||
if (return_null_if_not_found)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (throw_if_not_found)
|
||||
throw Exception("Session not found.", ErrorCodes::SESSION_NOT_FOUND);
|
||||
it = sessions.insert(std::make_pair(session_id, std::make_shared<NamedSession>(session_id, context, timeout, *this))).first;
|
||||
|
||||
session_exist = false;
|
||||
it = sessions_by_key.insert(std::make_shared<NamedSession>(session_id, context, timeout, *this)).first;
|
||||
}
|
||||
|
||||
/// Use existing session.
|
||||
const auto & session = it->second;
|
||||
|
||||
scheduleCloseSession(*session, lock);
|
||||
const auto & session = *it;
|
||||
|
||||
/// For cnch, it's of for session to not be unique, e.g. in union query, the sub-query will have same transaction id,
|
||||
/// therefore they shared same session on worker.
|
||||
|
@ -84,118 +92,116 @@ std::shared_ptr<NamedSession> NamedSessionsImpl<NamedSession>::acquireSession(
|
|||
{
|
||||
if (!session.unique())
|
||||
throw Exception("Session is locked by a concurrent client.", ErrorCodes::SESSION_IS_LOCKED);
|
||||
|
||||
// If http session enter again, try update timeout of it
|
||||
if (session_exist)
|
||||
{
|
||||
size_t new_close_time = current_time.epochTime() + timeout;
|
||||
if (session->close_time < new_close_time)
|
||||
sessions.template get<1>().modify(it, [&new_close_time](auto & temp) { temp->close_time = new_close_time; });
|
||||
}
|
||||
}
|
||||
|
||||
return session;
|
||||
}
|
||||
|
||||
template<typename NamedSession>
|
||||
std::vector<std::pair<typename NamedSession::NamedSessionKey, std::shared_ptr<CnchWorkerResource>>> NamedSessionsImpl<NamedSession>::getAllWorkerResources() const
|
||||
template <typename NamedSession>
|
||||
std::vector<std::pair<typename NamedSession::NamedSessionKey, std::shared_ptr<CnchWorkerResource>>>
|
||||
NamedSessionsImpl<NamedSession>::getAllWorkerResources() const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
std::vector<std::pair<Key, CnchWorkerResourcePtr>> res;
|
||||
for (const auto & [key, session]: sessions)
|
||||
std::lock_guard lock(mutex);
|
||||
for (const auto & session : sessions)
|
||||
{
|
||||
if (auto resource = session->context->getCnchWorkerResource())
|
||||
res.emplace_back(key, resource);
|
||||
res.emplace_back(session->key, resource);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
template<typename NamedSession>
|
||||
void NamedSessionsImpl<NamedSession>::scheduleCloseSession(NamedSession & session, std::unique_lock<std::mutex> &)
|
||||
{
|
||||
/// Push it on a queue of sessions to close, on a position corresponding to the timeout.
|
||||
/// (timeout is measured from current moment of time)
|
||||
|
||||
Poco::Timestamp current_time;
|
||||
if (session.close_time < current_time.epochTime() + session.timeout)
|
||||
{
|
||||
session.close_time = current_time.epochTime() + session.timeout;
|
||||
close_times.emplace(session.close_time, session.key);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename NamedSession>
|
||||
void NamedSessionsImpl<NamedSession>::cleanThread()
|
||||
{
|
||||
setThreadName("SessionCleaner");
|
||||
std::unique_lock lock{mutex};
|
||||
|
||||
|
||||
while (true)
|
||||
{
|
||||
auto interval = closeSessions(lock);
|
||||
auto interval = closeSessions();
|
||||
|
||||
std::unique_lock lock{mutex};
|
||||
if (cond.wait_for(lock, interval, [this]() -> bool { return quit; }))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
template<typename NamedSession>
|
||||
std::chrono::steady_clock::duration NamedSessionsImpl<NamedSession>::closeSessions(std::unique_lock<std::mutex> & lock)
|
||||
template <typename NamedSession>
|
||||
std::chrono::steady_clock::duration NamedSessionsImpl<NamedSession>::closeSessions()
|
||||
{
|
||||
/// Schedule closeSessions() every 1 second by default.
|
||||
static constexpr std::chrono::steady_clock::duration close_interval = std::chrono::seconds(1);
|
||||
auto log = getLogger("NamedSession");
|
||||
|
||||
if (close_times.empty())
|
||||
return close_interval;
|
||||
|
||||
static constexpr std::chrono::steady_clock::duration close_interval = std::chrono::seconds(10);
|
||||
static constexpr size_t max_batch_clean_size = 100;
|
||||
Poco::Timestamp current_time;
|
||||
std::vector<String> released_sessions;
|
||||
|
||||
while (!close_times.empty())
|
||||
{
|
||||
auto curr_session = *close_times.begin();
|
||||
std::unique_lock lock{mutex};
|
||||
auto & sessions_by_close = sessions.template get<0>();
|
||||
auto session_iter = sessions_by_close.begin();
|
||||
|
||||
if (curr_session.first > static_cast<size_t>(current_time.epochTime()))
|
||||
break;
|
||||
|
||||
auto session_iter = sessions.find(curr_session.second);
|
||||
|
||||
if (session_iter != sessions.end() && session_iter->second->close_time == curr_session.first)
|
||||
while (session_iter != sessions_by_close.end()
|
||||
&& (*session_iter)->close_time
|
||||
<= static_cast<size_t>(current_time.epochTime() && released_sessions.size() < max_batch_clean_size))
|
||||
{
|
||||
if (!session_iter->second.unique())
|
||||
if ((*session_iter).unique())
|
||||
{
|
||||
/// Skip to recycle and move it to close on the next interval.
|
||||
session_iter->second->timeout = 1;
|
||||
scheduleCloseSession(*session_iter->second, lock);
|
||||
released_sessions.emplace_back((*session_iter)->getID());
|
||||
session_iter = sessions_by_close.erase(session_iter);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_DEBUG(log, "Release timed out session: {}", session_iter->second->getID());
|
||||
sessions.erase(session_iter);
|
||||
}
|
||||
session_iter++;
|
||||
}
|
||||
|
||||
close_times.erase(close_times.begin());
|
||||
}
|
||||
|
||||
for (auto & session_id : released_sessions)
|
||||
LOG_INFO(getLogger("NamedSessionImpl"), "release timed out session: {}", session_id);
|
||||
|
||||
return close_interval;
|
||||
}
|
||||
|
||||
|
||||
NamedSession::NamedSession(NamedSessionKey key_, ContextPtr context_, size_t timeout_, NamedSessions & parent_)
|
||||
: key(key_), context(Context::createCopy(context_)), timeout(timeout_), parent(parent_)
|
||||
{
|
||||
CurrentMetrics::add(CurrentMetrics::ActiveHttpSession);
|
||||
close_time = Poco::Timestamp().epochTime() + timeout;
|
||||
}
|
||||
|
||||
NamedSession::~NamedSession()
|
||||
{
|
||||
CurrentMetrics::sub(CurrentMetrics::ActiveHttpSession);
|
||||
}
|
||||
|
||||
void NamedSession::release()
|
||||
{
|
||||
parent.releaseSession(*this);
|
||||
parent.tryUpdateSessionCloseTime(*this, Poco::Timestamp().epochTime() + timeout);
|
||||
}
|
||||
|
||||
NamedCnchSession::NamedCnchSession(NamedSessionKey key_, ContextPtr context_, size_t timeout_, NamedCnchSessions & parent_)
|
||||
: key(key_), context(Context::createCopy(context_)), timeout(timeout_), parent(parent_)
|
||||
{
|
||||
CurrentMetrics::add(CurrentMetrics::ActiveCnchSession);
|
||||
context->worker_resource = std::make_shared<CnchWorkerResource>();
|
||||
close_time = Poco::Timestamp().epochTime() + timeout;
|
||||
}
|
||||
|
||||
NamedCnchSession::~NamedCnchSession()
|
||||
{
|
||||
CurrentMetrics::sub(CurrentMetrics::ActiveCnchSession);
|
||||
}
|
||||
|
||||
void NamedCnchSession::release()
|
||||
{
|
||||
timeout = 0; /// schedule immediately
|
||||
close_time = 0;
|
||||
parent.releaseSession(*this);
|
||||
LOG_DEBUG(getLogger("NamedCnchSession"), "Release CnchWorkerResource {}", key);
|
||||
LOG_DEBUG(getLogger("NamedCnchSession"), "release CnchWorkerResource({})", key);
|
||||
}
|
||||
|
||||
void NamedCnchSession::registerPlanSegmentsCount(size_t _plan_segments_count)
|
||||
|
|
|
@ -15,9 +15,13 @@
|
|||
|
||||
#pragma once
|
||||
#include <Core/Types.h>
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <boost/multi_index/hashed_index.hpp>
|
||||
#include <boost/multi_index/member.hpp>
|
||||
#include <boost/multi_index/ordered_index.hpp>
|
||||
#include <boost/multi_index_container.hpp>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
|
@ -33,38 +37,57 @@ class NamedSessionsImpl
|
|||
{
|
||||
public:
|
||||
using Key = typename NamedSession::NamedSessionKey;
|
||||
using SessionKeyHash = typename NamedSession::SessionKeyHash;
|
||||
using SessionPtr = std::shared_ptr<NamedSession>;
|
||||
// sessions can be indexed by both close_time or txn_id
|
||||
// we sort sessions by close_time to release timeout session in order, use txd_id to release specific session
|
||||
using SessionContainer = boost::multi_index::multi_index_container<
|
||||
SessionPtr,
|
||||
boost::multi_index::indexed_by<
|
||||
boost::multi_index::ordered_non_unique<boost::multi_index::member<NamedSession, size_t, &NamedSession::close_time>>,
|
||||
boost::multi_index::hashed_unique<boost::multi_index::member<NamedSession, Key, &NamedSession::key>>>>;
|
||||
|
||||
~NamedSessionsImpl();
|
||||
|
||||
/// Find existing session or create a new.
|
||||
std::shared_ptr<NamedSession> acquireSession(
|
||||
const Key & session_id,
|
||||
ContextPtr context,
|
||||
size_t timeout,
|
||||
bool throw_if_not_found,
|
||||
bool return_null_if_not_found = false);
|
||||
const Key & session_id, ContextPtr context, size_t timeout, bool throw_if_not_found, bool return_null_if_not_found = false);
|
||||
|
||||
void releaseSession(NamedSession & session)
|
||||
{
|
||||
LOG_DEBUG(getLogger("NamedSessionImpl"), "release finished session: {}", session.getID());
|
||||
std::unique_lock lock(mutex);
|
||||
scheduleCloseSession(session, lock);
|
||||
auto & sessions_by_key = sessions.template get<1>();
|
||||
sessions_by_key.erase(session.key);
|
||||
}
|
||||
|
||||
void tryUpdateSessionCloseTime(NamedSession & session, size_t new_close_time)
|
||||
{
|
||||
if (session.close_time < new_close_time)
|
||||
{
|
||||
std::unique_lock lock(mutex);
|
||||
auto & sessions_by_key = sessions.template get<1>();
|
||||
sessions_by_key.modify(
|
||||
sessions_by_key.find(session.key), [&new_close_time](auto & temp) { temp->close_time = new_close_time; });
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::pair<Key, std::shared_ptr<CnchWorkerResource>>> getAllWorkerResources() const;
|
||||
|
||||
private:
|
||||
using Container = std::unordered_map<Key, std::shared_ptr<NamedSession>, SessionKeyHash>;
|
||||
using CloseTimes = std::multimap<size_t, Key>;
|
||||
Container sessions;
|
||||
CloseTimes close_times;
|
||||
// Used only for test
|
||||
size_t getCurrentActiveSession() const
|
||||
{
|
||||
std::unique_lock lock(mutex);
|
||||
return sessions.size();
|
||||
}
|
||||
|
||||
void scheduleCloseSession(NamedSession & session, std::unique_lock<std::mutex> &);
|
||||
|
||||
private:
|
||||
SessionContainer sessions;
|
||||
|
||||
void cleanThread();
|
||||
|
||||
/// Close sessions, that has been expired. Returns how long to wait for next session to be expired, if no new sessions will be added.
|
||||
std::chrono::steady_clock::duration closeSessions(std::unique_lock<std::mutex> & lock);
|
||||
/// Close sessions, that has been expired. ATTENTION: you need have a lock before calling this method.
|
||||
std::chrono::steady_clock::duration closeSessions();
|
||||
|
||||
mutable std::mutex mutex;
|
||||
std::condition_variable cond;
|
||||
|
@ -82,29 +105,25 @@ using NamedCnchSessions = NamedSessionsImpl<NamedCnchSession>;
|
|||
struct NamedSession
|
||||
{
|
||||
/// User name and session identifier. Named sessions are local to users.
|
||||
using NamedSessionKey = std::pair<String, String>;
|
||||
struct NamedSessionKey
|
||||
{
|
||||
String session_id;
|
||||
String user;
|
||||
|
||||
bool operator==(const NamedSessionKey & other) const { return session_id == other.session_id && user == other.user; }
|
||||
};
|
||||
|
||||
NamedSessionKey key;
|
||||
ContextMutablePtr context;
|
||||
size_t timeout;
|
||||
size_t timeout{0};
|
||||
size_t close_time{0};
|
||||
NamedSessionsImpl<NamedSession> & parent;
|
||||
|
||||
NamedSession(NamedSessionKey key_, ContextPtr context_, size_t timeout_, NamedSessions & parent_);
|
||||
~NamedSession();
|
||||
void release();
|
||||
|
||||
String getID() const { return key.first + "-" + key.second; }
|
||||
|
||||
class SessionKeyHash
|
||||
{
|
||||
public:
|
||||
size_t operator()(const NamedSessionKey & session_key) const
|
||||
{
|
||||
SipHash hash;
|
||||
hash.update(session_key.first);
|
||||
hash.update(session_key.second);
|
||||
return hash.get64();
|
||||
}
|
||||
};
|
||||
String getID() const { return key.session_id + "-" + key.user; }
|
||||
};
|
||||
|
||||
struct NamedCnchSession
|
||||
|
@ -114,11 +133,12 @@ struct NamedCnchSession
|
|||
|
||||
NamedSessionKey key;
|
||||
ContextMutablePtr context;
|
||||
size_t timeout;
|
||||
size_t timeout{0};
|
||||
size_t close_time{0};
|
||||
NamedSessionsImpl<NamedCnchSession> & parent;
|
||||
|
||||
NamedCnchSession(NamedSessionKey key_, ContextPtr context_, size_t timeout_, NamedCnchSessions & parent_);
|
||||
~NamedCnchSession();
|
||||
void release();
|
||||
|
||||
std::optional<std::atomic_size_t> plan_segments_count;
|
||||
|
@ -128,4 +148,12 @@ struct NamedCnchSession
|
|||
String getID() const { return std::to_string(key); }
|
||||
};
|
||||
|
||||
inline std::size_t hash_value(const NamedSession::NamedSessionKey & session_key)
|
||||
{
|
||||
SipHash hash;
|
||||
hash.update(session_key.session_id);
|
||||
hash.update(session_key.user);
|
||||
return hash.get64();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -516,6 +516,7 @@ NodeSelectorResult SourceNodeSelector::select(PlanSegment * plan_segment_ptr, Co
|
|||
|
||||
is_bucket_valid = is_bucket_valid && hasBucketScan(*plan_segment_ptr);
|
||||
|
||||
/// table => (worker => payload)
|
||||
const auto & source_task_payload_map = query_context->getCnchServerResource()->getSourceTaskPayload();
|
||||
for (const auto & plan_segment_input : plan_segment_ptr->getPlanSegmentInputs())
|
||||
{
|
||||
|
@ -530,7 +531,15 @@ NodeSelectorResult SourceNodeSelector::select(PlanSegment * plan_segment_ptr, Co
|
|||
rows_count += p.rows;
|
||||
auto & worker_payload = payload_on_workers[addr];
|
||||
worker_payload.rows += p.rows;
|
||||
worker_payload.part_num += 1;
|
||||
worker_payload.part_num += p.part_num;
|
||||
LOG_TRACE(
|
||||
log,
|
||||
"Payload on Worker({}) is rows:{} part_num:{} visible_part size:{} buckets size:{}",
|
||||
addr.toShortString(),
|
||||
p.rows,
|
||||
p.part_num,
|
||||
p.visible_parts.size(),
|
||||
p.buckets.size());
|
||||
if (is_bucket_valid)
|
||||
{
|
||||
for (auto bucket : p.buckets)
|
||||
|
|
|
@ -107,6 +107,7 @@ struct NodeSelectorResult
|
|||
std::vector<size_t> indexes;
|
||||
std::unordered_map<AddressInfo, size_t, AddressInfo::Hash> source_task_count_on_workers;
|
||||
std::unordered_map<AddressInfo, std::vector<std::set<Int64>>, AddressInfo::Hash> buckets_on_workers;
|
||||
std::unordered_map<AddressInfo, std::unordered_map<UUID, SourceTaskStat>, AddressInfo::Hash> worker_source_task_stats;
|
||||
|
||||
//input plansegment id => source address and partition ids, ordered by parallel index, used by bsp mode
|
||||
std::map<PlanSegmentInstanceId, std::vector<PlanSegmentMultiPartitionSource>> sources;
|
||||
|
|
|
@ -29,6 +29,7 @@ namespace DB
|
|||
NamesAndTypesList QueryExchangeLogElement::getNamesAndTypes()
|
||||
{
|
||||
return {
|
||||
{"txn_id", std::make_shared<DataTypeUInt64>()},
|
||||
{"initial_query_id", std::make_shared<DataTypeString>()},
|
||||
{"event_date", std::make_shared<DataTypeDate>()},
|
||||
{"event_time", std::make_shared<DataTypeDateTime>()},
|
||||
|
@ -87,6 +88,7 @@ NamesAndAliases QueryExchangeLogElement::getNamesAndAliases()
|
|||
void QueryExchangeLogElement::appendToBlock(MutableColumns & columns) const
|
||||
{
|
||||
size_t i = 0;
|
||||
columns[i++]->insert(txn_id);
|
||||
columns[i++]->insert(initial_query_id);
|
||||
columns[i++]->insert(DateLUT::serverTimezoneInstance().toDayNum(event_time).toUnderType());
|
||||
columns[i++]->insert(event_time);
|
||||
|
|
|
@ -15,8 +15,9 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <Interpreters/SystemLog.h>
|
||||
#include <limits>
|
||||
#include <Interpreters/ClientInfo.h>
|
||||
#include <Interpreters/SystemLog.h>
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
|
@ -27,6 +28,7 @@ namespace DB
|
|||
{
|
||||
struct QueryExchangeLogElement
|
||||
{
|
||||
UInt64 txn_id{std::numeric_limits<UInt64>::max()};
|
||||
String initial_query_id{"-1"};
|
||||
UInt64 exchange_id{std::numeric_limits<UInt64>::max()};
|
||||
UInt64 partition_id{std::numeric_limits<UInt64>::max()};
|
||||
|
|
|
@ -147,7 +147,8 @@ NamesAndTypesList QueryLogElement::getNamesAndTypes()
|
|||
{"virtual_warehouse", std::make_shared<DataTypeString>()},
|
||||
{"worker_group", std::make_shared<DataTypeString>()},
|
||||
{"query_plan", std::make_shared<DataTypeString>()},
|
||||
{"normalized_query_plan_hash", std::make_shared<DataTypeUInt64>()}};
|
||||
{"normalized_query_plan_hash", std::make_shared<DataTypeUInt64>()},
|
||||
{"txn_id", std::make_shared<DataTypeUInt64>()}};
|
||||
}
|
||||
|
||||
NamesAndAliases QueryLogElement::getNamesAndAliases()
|
||||
|
@ -350,6 +351,7 @@ void QueryLogElement::appendToBlock(MutableColumns & columns) const
|
|||
columns[i++]->insert(worker_group);
|
||||
columns[i++]->insert(query_plan);
|
||||
columns[i++]->insert(normalized_query_plan_hash);
|
||||
columns[i++]->insert(txn_id);
|
||||
}
|
||||
|
||||
void QueryLogElement::appendClientInfo(const ClientInfo & client_info, MutableColumns & columns, size_t & i)
|
||||
|
|
|
@ -106,6 +106,7 @@ struct QueryLogElement
|
|||
String worker_group;
|
||||
String query_plan;
|
||||
UInt64 normalized_query_plan_hash{};
|
||||
UInt64 txn_id{};
|
||||
|
||||
static std::string name() { return "QueryLog"; }
|
||||
|
||||
|
|
|
@ -119,7 +119,8 @@ WorkerGroupHandleImpl::WorkerGroupHandleImpl(
|
|||
default_database, user_password.first, user_password.second,
|
||||
/*cluster_*/"",/*cluster_secret_*/"",
|
||||
"server", address.compression, address.secure, 1,
|
||||
host.exchange_port, host.exchange_status_port, host.rpc_port, host.id);
|
||||
/// `exchange_port` is same as `rpc_port`.
|
||||
host.rpc_port, host.rpc_port, host.rpc_port, host.id);
|
||||
|
||||
info.pool = std::make_shared<ConnectionPoolWithFailover>(
|
||||
ConnectionPoolPtrs{pool}, settings.load_balancing, settings.connections_with_failover_max_tries);
|
||||
|
|
|
@ -635,6 +635,7 @@ static void onExceptionBeforeStart(
|
|||
bool throw_root_cause = needThrowRootCauseError(context.get(), elem.exception_code, elem.exception);
|
||||
|
||||
elem.client_info = context->getClientInfo();
|
||||
elem.txn_id = context->getCurrentTransactionID();
|
||||
elem.partition_ids = context->getPartitionIds();
|
||||
|
||||
elem.log_comment = settings.log_comment;
|
||||
|
@ -1542,6 +1543,8 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
|||
|
||||
elem.client_info = client_info;
|
||||
elem.partition_ids = context->getPartitionIds();
|
||||
if (txn)
|
||||
elem.txn_id = context->getCurrentTransactionID();
|
||||
|
||||
|
||||
if (auto worker_group = context->tryGetCurrentWorkerGroup())
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/NamedSession.h>
|
||||
#include <gtest/gtest.h>
|
||||
#include <Common/tests/gtest_global_context.h>
|
||||
|
||||
using namespace DB;
|
||||
|
||||
TEST(NamedSessionsTest, AcquireAndReleaseSession)
|
||||
{
|
||||
NamedCnchSessions sessions;
|
||||
|
||||
auto context = Context::createCopy(getContext().context);
|
||||
auto session = sessions.acquireSession(1, context, 10, false, false);
|
||||
// 2 = shared_ptr of current session + that in sessions
|
||||
ASSERT_EQ(session.use_count(), 2);
|
||||
auto session2 = sessions.acquireSession(2, context, 30, false, false);
|
||||
auto session3 = sessions.acquireSession(3, context, 20, false, false);
|
||||
ASSERT_EQ(sessions.getCurrentActiveSession(), 3);
|
||||
|
||||
session->release();
|
||||
ASSERT_EQ(sessions.getCurrentActiveSession(), 2);
|
||||
session2->release();
|
||||
session3->release();
|
||||
ASSERT_EQ(sessions.getCurrentActiveSession(), 0);
|
||||
}
|
||||
|
||||
TEST(NamedSessionsTest, SessionContainerTest)
|
||||
{
|
||||
NamedCnchSessions parent;
|
||||
NamedCnchSessions::SessionContainer sessions;
|
||||
|
||||
auto context = Context::createCopy(getContext().context);
|
||||
sessions.insert(std::make_shared<NamedCnchSession>(1, context, 10, parent));
|
||||
sessions.insert(std::make_shared<NamedCnchSession>(2, context, 30, parent));
|
||||
sessions.insert(std::make_shared<NamedCnchSession>(3, context, 20, parent));
|
||||
ASSERT_EQ(sessions.size(), 3);
|
||||
|
||||
// verify traverse by close time
|
||||
auto & sessions_by_close = sessions.template get<0>();
|
||||
auto session_iter = sessions_by_close.begin();
|
||||
ASSERT_EQ((*session_iter)->timeout, 10);
|
||||
session_iter++;
|
||||
ASSERT_EQ((*session_iter)->timeout, 20);
|
||||
// verify erase by iterator and the iterator position return by erase()
|
||||
session_iter = sessions_by_close.erase(session_iter);
|
||||
ASSERT_EQ((*session_iter)->timeout, 30);
|
||||
session_iter++;
|
||||
ASSERT_TRUE(session_iter == sessions_by_close.end());
|
||||
ASSERT_EQ(sessions.size(), 2);
|
||||
|
||||
// verify find by session key
|
||||
auto & sessions_by_key = sessions.template get<1>();
|
||||
auto session_iter_2 = sessions_by_key.find(1);
|
||||
ASSERT_EQ((*session_iter_2)->timeout, 10);
|
||||
|
||||
// verify erase by session key
|
||||
sessions_by_key.erase(1);
|
||||
ASSERT_EQ(sessions.size(), 1);
|
||||
}
|
||||
|
||||
TEST(NamedSessionsTest, SessionContainerUpdateTest)
|
||||
{
|
||||
NamedCnchSessions parent;
|
||||
NamedCnchSessions::SessionContainer sessions;
|
||||
|
||||
auto context = Context::createCopy(getContext().context);
|
||||
sessions.insert(std::make_shared<NamedCnchSession>(1, context, 10, parent));
|
||||
sessions.insert(std::make_shared<NamedCnchSession>(2, context, 30, parent));
|
||||
sessions.insert(std::make_shared<NamedCnchSession>(3, context, 20, parent));
|
||||
|
||||
// verify the sequence after updating close time
|
||||
size_t timeout = 100;
|
||||
auto & sessions_by_key = sessions.template get<1>();
|
||||
sessions_by_key.modify(sessions_by_key.find(1), [&timeout](auto & local_session) {
|
||||
local_session->timeout = timeout;
|
||||
local_session->close_time = Poco::Timestamp().epochTime() + timeout;
|
||||
});
|
||||
|
||||
timeout = 50;
|
||||
sessions_by_key.modify(sessions_by_key.find(3), [&timeout](auto & local_session) {
|
||||
local_session->timeout = timeout;
|
||||
local_session->close_time = Poco::Timestamp().epochTime() + timeout;
|
||||
});
|
||||
|
||||
auto & sessions_by_close = sessions.template get<0>();
|
||||
auto session_iter = sessions_by_close.begin();
|
||||
ASSERT_EQ((*session_iter)->timeout, 30);
|
||||
session_iter++;
|
||||
ASSERT_EQ((*session_iter)->timeout, 50);
|
||||
session_iter++;
|
||||
ASSERT_EQ((*session_iter)->timeout, 100);
|
||||
session_iter++;
|
||||
ASSERT_TRUE(session_iter == sessions_by_close.end());
|
||||
}
|
|
@ -0,0 +1,191 @@
|
|||
#include <MergeTreeCommon/CnchServerLeader.h>
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include <Catalog/Catalog.h>
|
||||
#include <Storages/PartCacheManager.h>
|
||||
#include <ServiceDiscovery/IServiceDiscovery.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
CnchServerLeader::CnchServerLeader(ContextPtr context_) : WithContext(context_)
|
||||
{
|
||||
auto task_func = [this] (String task_name, std::function<bool ()> func, UInt64 interval, std::atomic<UInt64> & last_time, BackgroundSchedulePool::TaskHolder & task)
|
||||
{
|
||||
/// Check schedule delay
|
||||
UInt64 start_time = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
|
||||
if (last_time && start_time > last_time && (start_time - last_time) > (1000 + interval))
|
||||
LOG_WARNING(log, "{} schedules over {}ms. Last finish time: {}, current time: {}", task_name, (1000 + interval), last_time, start_time);
|
||||
|
||||
bool success = false;
|
||||
try
|
||||
{
|
||||
success = func();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, __PRETTY_FUNCTION__);
|
||||
}
|
||||
/// Check execution time
|
||||
UInt64 finish_time = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
|
||||
if (finish_time > start_time && finish_time - start_time > 1000)
|
||||
LOG_WARNING(log, "{} executed over 1000ms. Start time: {}, current time: {}", task_name, start_time, finish_time);
|
||||
|
||||
last_time = finish_time;
|
||||
auto schedule_delay = success ? interval
|
||||
: getContext()->getSettingsRef().topology_retry_interval_ms.totalMilliseconds();
|
||||
task->scheduleAfter(schedule_delay);
|
||||
};
|
||||
|
||||
async_query_status_check_task = getContext()->getTopologySchedulePool().createTask("AsyncQueryStatusChecker", [this, task_func](){
|
||||
UInt64 async_query_status_check_interval = getContext()->getRootConfig().async_query_status_check_period * 1000;
|
||||
task_func("AsyncQueryStatusChecker"
|
||||
, [this]() -> bool { return checkAsyncQueryStatus(); }
|
||||
, async_query_status_check_interval
|
||||
, async_query_status_check_time
|
||||
, async_query_status_check_task);
|
||||
});
|
||||
|
||||
/// For leader election
|
||||
const auto & conf = getContext()->getRootConfig();
|
||||
auto refresh_interval_ms = conf.service_discovery_kv.server_leader_refresh_interval_ms.value;
|
||||
auto expired_interval_ms = conf.service_discovery_kv.server_leader_expired_interval_ms.value;
|
||||
auto prefix = conf.service_discovery_kv.election_prefix.value;
|
||||
auto election_path = prefix + conf.service_discovery_kv.server_leader_host_path.value;
|
||||
auto host = getContext()->getHostWithPorts();
|
||||
auto metastore_ptr = getContext()->getCnchCatalog()->getMetastore();
|
||||
|
||||
elector = std::make_shared<StorageElector>(
|
||||
std::make_shared<ServerManagerKvStorage>(metastore_ptr),
|
||||
refresh_interval_ms,
|
||||
expired_interval_ms,
|
||||
host,
|
||||
election_path,
|
||||
[&](const HostWithPorts *) { return onLeader(); },
|
||||
[&](const HostWithPorts *) { return onFollower(); }
|
||||
);
|
||||
}
|
||||
|
||||
CnchServerLeader::~CnchServerLeader()
|
||||
{
|
||||
try
|
||||
{
|
||||
shutDown();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
|
||||
bool CnchServerLeader::isLeader() const
|
||||
{
|
||||
return elector->isLeader();
|
||||
}
|
||||
|
||||
std::optional<HostWithPorts> CnchServerLeader::getCurrentLeader() const
|
||||
{
|
||||
return elector->getLeaderInfo();
|
||||
}
|
||||
|
||||
/// Callback by StorageElector. Need to gurantee no exception thrown in this method.
|
||||
bool CnchServerLeader::onLeader()
|
||||
{
|
||||
auto current_address = getContext()->getHostWithPorts().getRPCAddress();
|
||||
|
||||
try
|
||||
{
|
||||
async_query_status_check_task->activateAndSchedule();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
LOG_ERROR(log, "Failed to set leader status when current node becoming leader.");
|
||||
partialShutdown();
|
||||
return false;
|
||||
}
|
||||
|
||||
LOG_DEBUG(log, "Current node {} become leader", current_address);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CnchServerLeader::onFollower()
|
||||
{
|
||||
try
|
||||
{
|
||||
partialShutdown();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, __PRETTY_FUNCTION__);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CnchServerLeader::checkAsyncQueryStatus()
|
||||
{
|
||||
/// Mark inactive jobs to failed.
|
||||
try
|
||||
{
|
||||
auto statuses = getContext()->getCnchCatalog()->getIntermidiateAsyncQueryStatuses();
|
||||
std::vector<Protos::AsyncQueryStatus> to_expire;
|
||||
for (const auto & status : statuses)
|
||||
{
|
||||
/// Find the expired statuses.
|
||||
UInt64 start_time = static_cast<UInt64>(status.start_time());
|
||||
UInt64 execution_time = static_cast<UInt64>(status.max_execution_time());
|
||||
/// TODO(WangTao): We could have more accurate ways to expire status whose execution time is unlimited, like check its real status from host server.
|
||||
if (execution_time == 0)
|
||||
execution_time = getContext()->getRootConfig().async_query_expire_time;
|
||||
if (time(nullptr) - start_time > execution_time)
|
||||
{
|
||||
to_expire.push_back(std::move(status));
|
||||
}
|
||||
}
|
||||
|
||||
if (!to_expire.empty())
|
||||
{
|
||||
LOG_INFO(log, "Mark {} async queries to failed.", to_expire.size());
|
||||
getContext()->getCnchCatalog()->markBatchAsyncQueryStatusFailed(to_expire, "Status expired");
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void CnchServerLeader::shutDown()
|
||||
{
|
||||
try
|
||||
{
|
||||
async_query_status_check_task->deactivate();
|
||||
elector->stop();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log);
|
||||
}
|
||||
}
|
||||
|
||||
/// call me when election is expired
|
||||
void CnchServerLeader::partialShutdown()
|
||||
{
|
||||
try
|
||||
{
|
||||
leader_initialized = false;
|
||||
|
||||
async_query_status_check_task->deactivate();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
#pragma once
|
||||
|
||||
#include <Catalog/IMetastore.h>
|
||||
#include <Core/BackgroundSchedulePool.h>
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <MergeTreeCommon/CnchServerTopology.h>
|
||||
#include <Common/StorageElection/KvStorage.h>
|
||||
#include <Common/StorageElection/StorageElector.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/***
|
||||
* CnchServerLeader is responsible for manage cluster-unique (among all servers) services.
|
||||
*
|
||||
* Leader election is required to make sure there only one thread that can execute functions in a cluster at a time.
|
||||
*/
|
||||
class CnchServerLeader : public WithContext
|
||||
{
|
||||
public:
|
||||
explicit CnchServerLeader(ContextPtr context_);
|
||||
|
||||
~CnchServerLeader();
|
||||
|
||||
bool isLeader() const;
|
||||
std::optional<HostWithPorts> getCurrentLeader() const;
|
||||
|
||||
void shutDown();
|
||||
void partialShutdown();
|
||||
|
||||
private:
|
||||
/// Logger
|
||||
LoggerPtr log = getLogger("CnchServerLeader");
|
||||
/// Leader election related.
|
||||
bool onLeader();
|
||||
bool onFollower();
|
||||
std::shared_ptr<StorageElector> elector;
|
||||
std::atomic_bool leader_initialized{false};
|
||||
|
||||
/// User-defined logic.
|
||||
bool checkAsyncQueryStatus();
|
||||
BackgroundSchedulePool::TaskHolder async_query_status_check_task;
|
||||
std::atomic<UInt64> async_query_status_check_time{0};
|
||||
};
|
||||
|
||||
using CnchServerLeaderPtr = std::shared_ptr<CnchServerLeader>;
|
||||
|
||||
}
|
|
@ -49,7 +49,7 @@ String CnchServerVwTopology::format() const
|
|||
{
|
||||
if (i>0)
|
||||
ss << ", ";
|
||||
ss << servers[i].getHost();
|
||||
ss << servers[i].toDebugString();
|
||||
}
|
||||
ss << "]}";
|
||||
|
||||
|
@ -137,6 +137,10 @@ String CnchServerTopology::format() const
|
|||
ss << "{term: " << term;
|
||||
ss << ", initial: " << lease_initialtime;
|
||||
ss << ", expiration: " << lease_expiration;
|
||||
if (!leader_info.empty())
|
||||
ss << ", leader: " << leader_info;
|
||||
if (!reason.empty())
|
||||
ss << ", reason: " << reason;
|
||||
ss << ", [";
|
||||
for (auto it = vw_topologies.begin(); it != vw_topologies.end(); ++it)
|
||||
{
|
||||
|
@ -164,6 +168,29 @@ String dumpTopologies(const std::list<CnchServerTopology> & topologies)
|
|||
return ss.str();
|
||||
}
|
||||
|
||||
String dumpTopologies(const std::pair<std::map<String, CnchServerVwTopology>, std::map<String, CnchServerVwTopology>> & topology_diff) {
|
||||
auto left_topo = topology_diff.first;
|
||||
auto right_topo = topology_diff.second;
|
||||
std::stringstream ss;
|
||||
ss << "{";
|
||||
for (auto it = left_topo.begin(); it != left_topo.end(); ++it)
|
||||
{
|
||||
if (it != left_topo.begin())
|
||||
ss << ", ";
|
||||
ss << it->second.format();
|
||||
}
|
||||
ss << "} -> {";
|
||||
for (auto it = right_topo.begin(); it != right_topo.end(); ++it)
|
||||
{
|
||||
if (it != right_topo.begin())
|
||||
ss << ", ";
|
||||
ss << it->second.format();
|
||||
}
|
||||
|
||||
ss << "}";
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
bool CnchServerTopology::isSameTopologyWith(const CnchServerTopology & other_topology) const
|
||||
{
|
||||
if (vw_topologies.size() != other_topology.vw_topologies.size())
|
||||
|
@ -182,4 +209,37 @@ bool CnchServerTopology::isSameTopologyWith(const CnchServerTopology & other_top
|
|||
return true;
|
||||
}
|
||||
|
||||
bool CnchServerVwTopology::operator<(const CnchServerVwTopology & other) const
|
||||
{
|
||||
if (server_vw_name != other.server_vw_name)
|
||||
{
|
||||
return server_vw_name < other.server_vw_name;
|
||||
}
|
||||
/// Use "server-specific" `HostWithPorts` comparision logic.
|
||||
return std::lexicographical_compare(
|
||||
servers.begin(), servers.end(), other.servers.begin(), other.servers.end(), [](const auto & a, const auto & b) {
|
||||
return a.lessThan(b);
|
||||
});
|
||||
}
|
||||
std::pair<std::map<String, CnchServerVwTopology>, std::map<String, CnchServerVwTopology>>
|
||||
CnchServerTopology::diffWith(const CnchServerTopology & other_topology) const
|
||||
{
|
||||
std::map<String, CnchServerVwTopology> not_in_right;
|
||||
std::set_difference(
|
||||
vw_topologies.begin(),
|
||||
vw_topologies.end(),
|
||||
other_topology.vw_topologies.begin(),
|
||||
other_topology.vw_topologies.end(),
|
||||
std::inserter(not_in_right, not_in_right.begin()));
|
||||
|
||||
std::map<String, CnchServerVwTopology> not_in_left;
|
||||
std::set_difference(
|
||||
other_topology.vw_topologies.begin(),
|
||||
other_topology.vw_topologies.end(),
|
||||
vw_topologies.begin(),
|
||||
vw_topologies.end(),
|
||||
std::inserter(not_in_left, not_in_left.begin()));
|
||||
|
||||
return {not_in_right, not_in_left};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,11 +26,12 @@ namespace DB
|
|||
class CnchServerVwTopology
|
||||
{
|
||||
public:
|
||||
CnchServerVwTopology(const String & vw_name_);
|
||||
explicit CnchServerVwTopology(const String & vw_name_);
|
||||
|
||||
bool operator<(const CnchServerVwTopology & other) const;
|
||||
void addServer(const HostWithPorts & server);
|
||||
const HostWithPortsVec & getServerList() const;
|
||||
|
||||
|
||||
String getServerVwName() const;
|
||||
|
||||
String format() const;
|
||||
|
@ -46,7 +47,8 @@ class CnchServerTopology
|
|||
{
|
||||
|
||||
public:
|
||||
CnchServerTopology() {}
|
||||
CnchServerTopology() = default;
|
||||
explicit CnchServerTopology(const String & leader_info_) : leader_info(leader_info_) { }
|
||||
|
||||
void addServer(const HostWithPorts & server, const String & server_vw_name = DEFAULT_SERVER_VW_NAME);
|
||||
|
||||
|
@ -75,16 +77,33 @@ public:
|
|||
|
||||
bool isSameTopologyWith(const CnchServerTopology & other_topology) const;
|
||||
|
||||
std::pair<std::map<String, CnchServerVwTopology>, std::map<String, CnchServerVwTopology>>
|
||||
diffWith(const CnchServerTopology & other_topology) const;
|
||||
|
||||
const String & getLeaderInfo() const { return leader_info; }
|
||||
void setLeaderInfo(const String & leader_info_) { leader_info = leader_info_; }
|
||||
const String & getReason() const { return reason; }
|
||||
void setReason(const String & reason_) { reason = reason_; }
|
||||
|
||||
private:
|
||||
UInt64 lease_initialtime = 0;
|
||||
UInt64 lease_expiration = 0;
|
||||
UInt64 term = 0;
|
||||
String leader_info;
|
||||
String reason;
|
||||
HostWithPortsVec servers;
|
||||
std::map<String, CnchServerVwTopology> vw_topologies;
|
||||
};
|
||||
|
||||
|
||||
String dumpTopologies(const std::list<CnchServerTopology>& topologies);
|
||||
|
||||
/**
|
||||
* @brief Dump the difference two topologies. (a helper function for debugging)
|
||||
*
|
||||
* @param topology_diff Diffs of two topologies. Each part of the pair
|
||||
* is a map of vw_name to vw_topology. You can get the diff
|
||||
* by calling `diffWith` method of `CnchServerTopology`.
|
||||
* @return A string representation of the difference.
|
||||
*/
|
||||
String dumpTopologies(const std::pair<std::map<String, CnchServerVwTopology>, std::map<String, CnchServerVwTopology>> & topology_diff);
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue