diff --git a/base/common/logger_useful.h b/base/common/logger_useful.h index a364c8196b..6dea0fc174 100644 --- a/base/common/logger_useful.h +++ b/base/common/logger_useful.h @@ -27,6 +27,7 @@ #include #include #include +#include namespace @@ -44,20 +45,21 @@ namespace #define LOG_IMPL(logger, priority, PRIORITY, ...) do \ { \ - const bool _is_clients_log = (DB::CurrentThread::getGroup() != nullptr) && \ + const bool _is_clients_log = (DB::CurrentThread::getGroup() != nullptr) && \ (DB::CurrentThread::getGroup()->client_logs_level >= (priority)); \ - if ((logger)->is((PRIORITY)) || _is_clients_log) \ + const auto & _logger = (logger); \ + if ((_logger)->is((PRIORITY)) || _is_clients_log) \ { \ std::string formatted_message = numArgs(__VA_ARGS__) > 1 ? fmt::format(__VA_ARGS__) : firstArg(__VA_ARGS__); \ - if (auto _channel = (logger)->getChannel()) \ + if (auto _channel = (_logger)->getChannel()) \ { \ std::string file_function; \ file_function += __FILE__; \ file_function += "; "; \ file_function += __PRETTY_FUNCTION__; \ - Poco::Message poco_message((logger)->name(), formatted_message, \ + Poco::Message poco_message((_logger)->name(), formatted_message, \ (PRIORITY), file_function.c_str(), __LINE__); \ - _channel->log(poco_message); \ + _channel->log(poco_message); \ } \ } \ } while (false) diff --git a/base/metrics2/metric_helper.cpp b/base/metrics2/metric_helper.cpp index 51d336d36c..90a276fb64 100644 --- a/base/metrics2/metric_helper.cpp +++ b/base/metrics2/metric_helper.cpp @@ -87,9 +87,9 @@ static void InitMetricsHelper(const metrics2::MetricCollectorConf& config, const metrics2::Metrics::init(config); isInitialized = true; - LOG_INFO(&Poco::Logger::get("metric_helper::InitMetricsHelper"), "{}, tag = {}", config.toString(), tags); + LOG_INFO(getLogger("metric_helper::InitMetricsHelper"), "{}, tag = {}", config.toString(), tags); - // LOG_INFO(&Poco::Logger::get("metric_helper::InitMetricsHelper"), config.toString() << ", tag = " << tags); + // LOG_INFO(getLogger("metric_helper::InitMetricsHelper"), config.toString() << ", tag = " << tags); } void InitMetrics(const metrics2::MetricCollectorConf& config, const std::string & custom_tags) { diff --git a/programs/copier/ClusterCopier.h b/programs/copier/ClusterCopier.h index 387b089724..1fde12b434 100644 --- a/programs/copier/ClusterCopier.h +++ b/programs/copier/ClusterCopier.h @@ -1,5 +1,6 @@ #pragma once +#include #include "Aliases.h" #include "Internals.h" #include "TaskCluster.h" @@ -19,7 +20,7 @@ public: const String & host_id_, const String & proxy_database_name_, ContextMutablePtr context_, - Poco::Logger * log_) + LoggerPtr log_) : WithMutableContext(context_), task_zookeeper_path(task_path_), host_id(host_id_), @@ -216,7 +217,7 @@ private: bool experimental_use_sample_offset{false}; - Poco::Logger * log; + LoggerPtr log; std::chrono::milliseconds default_sleep_time{1000}; }; diff --git a/programs/copier/ClusterCopierApp.cpp b/programs/copier/ClusterCopierApp.cpp index 3a3b0bf9cb..bfa9228a3f 100644 --- a/programs/copier/ClusterCopierApp.cpp +++ b/programs/copier/ClusterCopierApp.cpp @@ -130,7 +130,7 @@ void ClusterCopierApp::mainImpl() StatusFile status_file(process_path + "/status", StatusFile::write_full_info); ThreadStatus thread_status; - auto * log = &logger(); + auto log = getLogger(logger()); LOG_INFO(log, "Starting clickhouse-copier (id {}, host_id {}, path {}, revision {})", process_id, host_id, process_path, ClickHouseRevision::getVersionRevision()); SharedContextHolder shared_context = Context::createShared(); diff --git a/programs/copier/ZooKeeperStaff.h b/programs/copier/ZooKeeperStaff.h index 66036ae2f2..bc284059cc 100644 --- a/programs/copier/ZooKeeperStaff.h +++ b/programs/copier/ZooKeeperStaff.h @@ -1,5 +1,7 @@ #pragma once +#include + /** Allows to compare two incremental counters of type UInt32 in presence of possible overflow. * We assume that we compare values that are not too far away. * For example, when we increment 0xFFFFFFFF, we get 0. So, 0xFFFFFFFF is less than 0. @@ -177,7 +179,7 @@ public: auto watch_callback = [stale = stale] (const Coordination::WatchResponse & rsp) { - auto logger = &Poco::Logger::get("ClusterCopier"); + auto logger = getLogger("ClusterCopier"); if (rsp.error == Coordination::Error::ZOK) { switch (rsp.type) diff --git a/programs/dumper/DumpHelper.h b/programs/dumper/DumpHelper.h index 2650ed161a..b23d9eca55 100644 --- a/programs/dumper/DumpHelper.h +++ b/programs/dumper/DumpHelper.h @@ -1,4 +1,5 @@ // #include +#include #include #include #include @@ -101,7 +102,7 @@ public: void removeDumpVersionFromZk(const Context & context); - void setLog(Poco::Logger * log_) { log = log_; } + void setLog(LoggerPtr log_) { log = log_; } private: void writeTempUniqueKeyIndex(Block & block, size_t first_rid, rocksdb::DB & temp_index, StorageCloudMergeTree & cloud); @@ -111,7 +112,7 @@ private: String unique_version_column; String dump_lsn_path; ManifestStore manifest_store; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/programs/dumper/Dumper.cpp b/programs/dumper/Dumper.cpp index aa2f2d8a57..7711c29593 100644 --- a/programs/dumper/Dumper.cpp +++ b/programs/dumper/Dumper.cpp @@ -102,7 +102,7 @@ public: const String & partition, const std::vector& partitionlist, const std::vector& skippartitionlist); - + void initDataDiskPath(const String & escaped_database, const String & escaped_table, std::vector & data_paths, @@ -113,7 +113,7 @@ public: Snapshot & snapshot, const std::shared_ptr & local_disk, const std::shared_ptr & remote_disk); - + void getUniqueTableActivePartsFromDisk(StorageCloudMergeTree & cloud, Snapshot & snapshot, const String & escaped_database, @@ -135,7 +135,7 @@ private: ContextMutablePtr global_context; Settings settings; Int64 current_shard_number {0}; - Poco::Logger * log{}; + LoggerPtr log{}; UniqueTableDumpHelper unique_table_dump_helper; }; @@ -222,7 +222,7 @@ void ClickHouseDumper::defineOptions(Poco::Util::OptionSet & options) options.addOption(Poco::Util::Option("skip_unkowning_settings", "", "skip dumper unknown settings") // .required(false) .binding("skip_unkowning_settings")); - + options.addOption(Poco::Util::Option("multi_disk_path_list", "", "multi disk path list") // .required(false) .argument("") @@ -478,7 +478,7 @@ void ClickHouseDumper::initDataDiskPath( for (const auto & disk_name : vec_names) { String disk_path = disk_name + "data/" + escaped_database + "/" + escaped_table + "/"; - + data_paths.push_back(std::move(disk_path)); } } @@ -591,7 +591,7 @@ void ClickHouseDumper::processTable(const String & database, const String & tabl throw Exception("Table " + db_table + " is atomic database : " + attach_query_str, ErrorCodes::UNKNOWN_TABLE); initDataDiskPath(escaped_database, escaped_table, data_paths, is_multi_disk); - + /// Get unique table snapshot if (cloud.getInMemoryMetadataPtr()->hasUniqueKey()) { @@ -812,8 +812,8 @@ int ClickHouseDumper::main(const std::vector &) config().add(config_processor.loadConfig().configuration.duplicate(), PRIO_APPLICATION, true, false); } - log = &logger(); - log->setLevel(config().getString("logger.level", "debug")); + logger().setLevel(config().getString("logger.level", "debug")); + log = getLogger(logger()); unique_table_dump_helper.setLog(log); shared_context = DB::Context::createShared(); diff --git a/programs/keeper-converter/KeeperConverter.cpp b/programs/keeper-converter/KeeperConverter.cpp index 78f15df0b0..7a4060c932 100644 --- a/programs/keeper-converter/KeeperConverter.cpp +++ b/programs/keeper-converter/KeeperConverter.cpp @@ -48,8 +48,9 @@ int mainEntryClickHouseKeeperConverter(int argc, char ** argv) po::store(po::command_line_parser(argc, argv).options(desc).run(), options); Poco::AutoPtr console_channel(new Poco::ConsoleChannel); - Poco::Logger * logger = &Poco::Logger::get("KeeperConverter"); - logger->setChannel(console_channel); + LoggerRawPtr raw_logger = getRawLogger("KeeperConverter"); + raw_logger->setChannel(console_channel); + LoggerPtr logger = getLogger(*raw_logger); if (options.count("help")) { diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp index f50afe4609..fb81cc20c3 100644 --- a/programs/keeper/Keeper.cpp +++ b/programs/keeper/Keeper.cpp @@ -122,7 +122,7 @@ int waitServersToFinish(std::vector & servers, size_t return current_connections; } -Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port, Poco::Logger * log) +Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port, LoggerPtr log) { Poco::Net::SocketAddress socket_address; try @@ -186,7 +186,7 @@ std::string getUserName(uid_t user_id) Poco::Net::SocketAddress Keeper::socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure) const { - auto address = makeSocketAddress(host, port, &logger()); + auto address = makeSocketAddress(host, port, getLogger(logger())); #if !defined(POCO_CLICKHOUSE_PATCH) || POCO_VERSION < 0x01090100 if (secure) /// Bug in old (<1.9.1) poco, listen() after bind() with reusePort param will fail because have no implementation in SecureServerSocketImpl @@ -313,7 +313,7 @@ void Keeper::defineOptions(Poco::Util::OptionSet & options) int Keeper::main(const std::vector & /*args*/) { - Poco::Logger * log = &logger(); + LoggerPtr log = getLogger(logger()); UseSSL use_ssl; diff --git a/programs/keeper/Keeper.h b/programs/keeper/Keeper.h index d724cbc421..c4f9f4e04c 100644 --- a/programs/keeper/Keeper.h +++ b/programs/keeper/Keeper.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include diff --git a/programs/library-bridge/CatBoostLibraryHandlerFactory.cpp b/programs/library-bridge/CatBoostLibraryHandlerFactory.cpp index 07b3f2f334..bea62bd43b 100644 --- a/programs/library-bridge/CatBoostLibraryHandlerFactory.cpp +++ b/programs/library-bridge/CatBoostLibraryHandlerFactory.cpp @@ -13,7 +13,7 @@ CatBoostLibraryHandlerFactory & CatBoostLibraryHandlerFactory::instance() } CatBoostLibraryHandlerFactory::CatBoostLibraryHandlerFactory() - : log(&Poco::Logger::get("CatBoostLibraryHandlerFactory")) + : log(getLogger("CatBoostLibraryHandlerFactory")) { } diff --git a/programs/library-bridge/CatBoostLibraryHandlerFactory.h b/programs/library-bridge/CatBoostLibraryHandlerFactory.h index 35fb353e68..0b561751e6 100644 --- a/programs/library-bridge/CatBoostLibraryHandlerFactory.h +++ b/programs/library-bridge/CatBoostLibraryHandlerFactory.h @@ -1,5 +1,6 @@ #pragma once +#include #include "CatBoostLibraryHandler.h" #include @@ -31,7 +32,7 @@ private: /// map: model path --> catboost library handler std::unordered_map library_handlers TSA_GUARDED_BY(mutex); std::mutex mutex; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/programs/library-bridge/ExternalDictionaryLibraryHandlerFactory.cpp b/programs/library-bridge/ExternalDictionaryLibraryHandlerFactory.cpp index d02688d216..234b7a8833 100644 --- a/programs/library-bridge/ExternalDictionaryLibraryHandlerFactory.cpp +++ b/programs/library-bridge/ExternalDictionaryLibraryHandlerFactory.cpp @@ -26,7 +26,7 @@ void ExternalDictionaryLibraryHandlerFactory::create( if (library_handlers.contains(dictionary_id)) { - LOG_WARNING(&Poco::Logger::get("ExternalDictionaryLibraryHandlerFactory"), "Library handler with dictionary id {} already exists", dictionary_id); + LOG_WARNING(getLogger("ExternalDictionaryLibraryHandlerFactory"), "Library handler with dictionary id {} already exists", dictionary_id); return; } diff --git a/programs/library-bridge/LibraryBridgeHandlerFactory.cpp b/programs/library-bridge/LibraryBridgeHandlerFactory.cpp index 4af1f8355e..e5ab22f2d4 100644 --- a/programs/library-bridge/LibraryBridgeHandlerFactory.cpp +++ b/programs/library-bridge/LibraryBridgeHandlerFactory.cpp @@ -12,7 +12,7 @@ LibraryBridgeHandlerFactory::LibraryBridgeHandlerFactory( size_t keep_alive_timeout_, ContextPtr context_) : WithContext(context_) - , log(&Poco::Logger::get(name_)) + , log(getLogger(name_)) , name(name_) , keep_alive_timeout(keep_alive_timeout_) { diff --git a/programs/library-bridge/LibraryBridgeHandlerFactory.h b/programs/library-bridge/LibraryBridgeHandlerFactory.h index 12efc2ae6a..085f24db38 100644 --- a/programs/library-bridge/LibraryBridgeHandlerFactory.h +++ b/programs/library-bridge/LibraryBridgeHandlerFactory.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -19,7 +20,7 @@ public: std::unique_ptr createRequestHandler(const HTTPServerRequest & request) override; private: - Poco::Logger * log; + LoggerPtr log; const std::string name; const size_t keep_alive_timeout; }; diff --git a/programs/library-bridge/LibraryBridgeHandlers.cpp b/programs/library-bridge/LibraryBridgeHandlers.cpp index 09ca91c740..b4098f01a7 100644 --- a/programs/library-bridge/LibraryBridgeHandlers.cpp +++ b/programs/library-bridge/LibraryBridgeHandlers.cpp @@ -46,7 +46,7 @@ namespace if (!response.sent()) *response.send() << message << std::endl; - LOG_WARNING(&Poco::Logger::get("LibraryBridge"), (message)); + LOG_WARNING(getLogger("LibraryBridge"), (message)); } std::shared_ptr parseColumns(String && column_string) @@ -93,7 +93,7 @@ static void writeData(Block data, OutputFormatPtr format) ExternalDictionaryLibraryBridgeRequestHandler::ExternalDictionaryLibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_) : WithContext(context_) , keep_alive_timeout(keep_alive_timeout_) - , log(&Poco::Logger::get("ExternalDictionaryLibraryBridgeRequestHandler")) + , log(getLogger("ExternalDictionaryLibraryBridgeRequestHandler")) { } @@ -385,7 +385,7 @@ void ExternalDictionaryLibraryBridgeRequestHandler::handleRequest(HTTPServerRequ ExternalDictionaryLibraryBridgeExistsHandler::ExternalDictionaryLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_) : WithContext(context_) , keep_alive_timeout(keep_alive_timeout_) - , log(&Poco::Logger::get("ExternalDictionaryLibraryBridgeExistsHandler")) + , log(getLogger("ExternalDictionaryLibraryBridgeExistsHandler")) { } @@ -424,7 +424,7 @@ CatBoostLibraryBridgeRequestHandler::CatBoostLibraryBridgeRequestHandler( size_t keep_alive_timeout_, ContextPtr context_) : WithContext(context_) , keep_alive_timeout(keep_alive_timeout_) - , log(&Poco::Logger::get("CatBoostLibraryBridgeRequestHandler")) + , log(getLogger("CatBoostLibraryBridgeRequestHandler")) { } @@ -622,7 +622,7 @@ void CatBoostLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & requ CatBoostLibraryBridgeExistsHandler::CatBoostLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_) : WithContext(context_) , keep_alive_timeout(keep_alive_timeout_) - , log(&Poco::Logger::get("CatBoostLibraryBridgeExistsHandler")) + , log(getLogger("CatBoostLibraryBridgeExistsHandler")) { } diff --git a/programs/library-bridge/LibraryBridgeHandlers.h b/programs/library-bridge/LibraryBridgeHandlers.h index aafe58ffcf..8bc4be6f84 100644 --- a/programs/library-bridge/LibraryBridgeHandlers.h +++ b/programs/library-bridge/LibraryBridgeHandlers.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -26,7 +27,7 @@ private: static constexpr inline auto FORMAT = "RowBinary"; const size_t keep_alive_timeout; - Poco::Logger * log; + LoggerPtr log; }; @@ -40,7 +41,7 @@ public: private: const size_t keep_alive_timeout; - Poco::Logger * log; + LoggerPtr log; }; @@ -69,7 +70,7 @@ public: private: const size_t keep_alive_timeout; - Poco::Logger * log; + LoggerPtr log; }; @@ -83,7 +84,7 @@ public: private: const size_t keep_alive_timeout; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index f3ebd09ddf..a5bcf2331a 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -153,7 +153,7 @@ void LocalServer::tryInitPath() { // The path is not provided explicitly - use a unique path in the system temporary directory // (or in the current dir if temporary don't exist) - Poco::Logger * log = &logger(); + LoggerPtr log = getLogger("LocalServer"); std::filesystem::path parent_folder; std::filesystem::path default_path; @@ -217,7 +217,7 @@ static DatabasePtr createMemoryDatabaseIfNotExists(ContextPtr context, const Str int LocalServer::main(const std::vector & /*args*/) try { - Poco::Logger * log = &logger(); + LoggerPtr log = getLogger("LocalServer"); ThreadStatus thread_status; UseSSL use_ssl; diff --git a/programs/odbc-bridge/ColumnInfoHandler.h b/programs/odbc-bridge/ColumnInfoHandler.h index bc976f54ae..e96de54ad8 100644 --- a/programs/odbc-bridge/ColumnInfoHandler.h +++ b/programs/odbc-bridge/ColumnInfoHandler.h @@ -2,6 +2,7 @@ #if USE_ODBC +#include #include #include #include @@ -17,7 +18,7 @@ class ODBCColumnsInfoHandler : public HTTPRequestHandler, WithContext public: ODBCColumnsInfoHandler(size_t keep_alive_timeout_, ContextPtr context_) : WithContext(context_) - , log(&Poco::Logger::get("ODBCColumnsInfoHandler")) + , log(getLogger("ODBCColumnsInfoHandler")) , keep_alive_timeout(keep_alive_timeout_) { } @@ -25,7 +26,7 @@ public: void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; private: - Poco::Logger * log; + LoggerPtr log; size_t keep_alive_timeout; }; diff --git a/programs/odbc-bridge/HandlerFactory.h b/programs/odbc-bridge/HandlerFactory.h index ffbbe3670a..616fd6c23c 100644 --- a/programs/odbc-bridge/HandlerFactory.h +++ b/programs/odbc-bridge/HandlerFactory.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include "ColumnInfoHandler.h" @@ -19,7 +20,7 @@ class ODBCBridgeHandlerFactory : public HTTPRequestHandlerFactory, WithContext public: ODBCBridgeHandlerFactory(const std::string & name_, size_t keep_alive_timeout_, ContextPtr context_) : WithContext(context_) - , log(&Poco::Logger::get(name_)) + , log(getLogger(name_)) , name(name_) , keep_alive_timeout(keep_alive_timeout_) { @@ -28,7 +29,7 @@ public: std::unique_ptr createRequestHandler(const HTTPServerRequest & request) override; private: - Poco::Logger * log; + LoggerPtr log; std::string name; size_t keep_alive_timeout; }; diff --git a/programs/odbc-bridge/IdentifierQuoteHandler.h b/programs/odbc-bridge/IdentifierQuoteHandler.h index ef3806fd80..5f69f6af92 100644 --- a/programs/odbc-bridge/IdentifierQuoteHandler.h +++ b/programs/odbc-bridge/IdentifierQuoteHandler.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -16,7 +17,7 @@ class IdentifierQuoteHandler : public HTTPRequestHandler, WithContext public: IdentifierQuoteHandler(size_t keep_alive_timeout_, ContextPtr context_) : WithContext(context_) - , log(&Poco::Logger::get("IdentifierQuoteHandler")) + , log(getLogger("IdentifierQuoteHandler")) , keep_alive_timeout(keep_alive_timeout_) { } @@ -24,7 +25,7 @@ public: void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; private: - Poco::Logger * log; + LoggerPtr log; size_t keep_alive_timeout; }; diff --git a/programs/odbc-bridge/MainHandler.h b/programs/odbc-bridge/MainHandler.h index bc0fca8b9a..3f3374bf05 100644 --- a/programs/odbc-bridge/MainHandler.h +++ b/programs/odbc-bridge/MainHandler.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -24,7 +25,7 @@ public: ContextPtr context_, const String & mode_) : WithContext(context_) - , log(&Poco::Logger::get("ODBCHandler")) + , log(getLogger("ODBCHandler")) , keep_alive_timeout(keep_alive_timeout_) , mode(mode_) { @@ -33,7 +34,7 @@ public: void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; private: - Poco::Logger * log; + LoggerPtr log; size_t keep_alive_timeout; String mode; diff --git a/programs/odbc-bridge/ODBCBlockInputStream.cpp b/programs/odbc-bridge/ODBCBlockInputStream.cpp index 1ad2dc48df..8efcc96c30 100644 --- a/programs/odbc-bridge/ODBCBlockInputStream.cpp +++ b/programs/odbc-bridge/ODBCBlockInputStream.cpp @@ -21,7 +21,7 @@ namespace ErrorCodes ODBCBlockInputStream::ODBCBlockInputStream( nanodbc::ConnectionHolderPtr connection_holder, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_) - : log(&Poco::Logger::get("ODBCBlockInputStream")) + : log(getLogger("ODBCBlockInputStream")) , max_block_size{max_block_size_} , query(query_str) { diff --git a/programs/odbc-bridge/ODBCBlockInputStream.h b/programs/odbc-bridge/ODBCBlockInputStream.h index 26aa766dbc..336e255fee 100644 --- a/programs/odbc-bridge/ODBCBlockInputStream.h +++ b/programs/odbc-bridge/ODBCBlockInputStream.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -32,7 +33,7 @@ private: column.insertFrom(sample_column, 0); } - Poco::Logger * log; + LoggerPtr log; const UInt64 max_block_size; ExternalResultDescription description; diff --git a/programs/odbc-bridge/ODBCBlockOutputStream.cpp b/programs/odbc-bridge/ODBCBlockOutputStream.cpp index 9a6cd72d3f..731e670203 100644 --- a/programs/odbc-bridge/ODBCBlockOutputStream.cpp +++ b/programs/odbc-bridge/ODBCBlockOutputStream.cpp @@ -46,7 +46,7 @@ ODBCBlockOutputStream::ODBCBlockOutputStream(nanodbc::ConnectionHolderPtr connec const Block & sample_block_, ContextPtr local_context_, IdentifierQuotingStyle quoting_) - : log(&Poco::Logger::get("ODBCBlockOutputStream")) + : log(getLogger("ODBCBlockOutputStream")) , connection_holder(std::move(connection_holder_)) , db_name(remote_database_name_) , table_name(remote_table_name_) diff --git a/programs/odbc-bridge/ODBCBlockOutputStream.h b/programs/odbc-bridge/ODBCBlockOutputStream.h index 1b42119e49..d73a80baa2 100644 --- a/programs/odbc-bridge/ODBCBlockOutputStream.h +++ b/programs/odbc-bridge/ODBCBlockOutputStream.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -27,7 +28,7 @@ public: void write(const Block & block) override; private: - Poco::Logger * log; + LoggerPtr log; nanodbc::ConnectionHolderPtr connection_holder; std::string db_name; diff --git a/programs/odbc-bridge/SchemaAllowedHandler.h b/programs/odbc-bridge/SchemaAllowedHandler.h index d7b922ed05..9197b03b1f 100644 --- a/programs/odbc-bridge/SchemaAllowedHandler.h +++ b/programs/odbc-bridge/SchemaAllowedHandler.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -18,7 +19,7 @@ class SchemaAllowedHandler : public HTTPRequestHandler, WithContext public: SchemaAllowedHandler(size_t keep_alive_timeout_, ContextPtr context_) : WithContext(context_) - , log(&Poco::Logger::get("SchemaAllowedHandler")) + , log(getLogger("SchemaAllowedHandler")) , keep_alive_timeout(keep_alive_timeout_) { } @@ -26,7 +27,7 @@ public: void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; private: - Poco::Logger * log; + LoggerPtr log; size_t keep_alive_timeout; }; diff --git a/programs/odbc-bridge/getIdentifierQuote.cpp b/programs/odbc-bridge/getIdentifierQuote.cpp index eebb14eb24..766c096e23 100644 --- a/programs/odbc-bridge/getIdentifierQuote.cpp +++ b/programs/odbc-bridge/getIdentifierQuote.cpp @@ -26,7 +26,7 @@ std::string getIdentifierQuote(nanodbc::ConnectionHolderPtr connection_holder) } catch (...) { - LOG_WARNING(&Poco::Logger::get("ODBCGetIdentifierQuote"), "Cannot fetch identifier quote. Default double quote is used. Reason: {}", getCurrentExceptionMessage(false)); + LOG_WARNING(getLogger("ODBCGetIdentifierQuote"), "Cannot fetch identifier quote. Default double quote is used. Reason: {}", getCurrentExceptionMessage(false)); return "\""; } diff --git a/programs/part-toolkit/PartMergerApp.cpp b/programs/part-toolkit/PartMergerApp.cpp index 2303c9e9a5..ef158f07c3 100644 --- a/programs/part-toolkit/PartMergerApp.cpp +++ b/programs/part-toolkit/PartMergerApp.cpp @@ -45,7 +45,7 @@ const std::string PartMergerApp::default_config = "\n" "merger\n" ""; -void PartMergerApp::initHDFS(DB::ContextMutablePtr context, Poco::Logger * log) +void PartMergerApp::initHDFS(DB::ContextMutablePtr context, LoggerPtr log) { LOG_DEBUG(log, "Initialize HDFS driver."); using HDFSConnectionParams = DB::HDFSConnectionParams; @@ -109,7 +109,7 @@ int PartMergerApp::main([[maybe_unused]] const std::vector & args) { Poco::Logger::root().setLevel("information"); } - auto * log = &Poco::Logger::get("PartMergerApp"); + auto log = getLogger("PartMergerApp"); LOG_DEBUG(log, "Parse arguments"); // Parse arguments. diff --git a/programs/part-toolkit/PartMergerApp.h b/programs/part-toolkit/PartMergerApp.h index 744f4242c0..2aeabec1bd 100644 --- a/programs/part-toolkit/PartMergerApp.h +++ b/programs/part-toolkit/PartMergerApp.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -54,12 +55,12 @@ private: /** * Print help message for part_merger_tool. */ - inline static void mergerHelp(Poco::Logger * log) { LOG_ERROR(log, PartMergerApp::help_message); } + inline static void mergerHelp(LoggerPtr log) { LOG_ERROR(log, PartMergerApp::help_message); } /** * Init HDFS default configuration. */ - void initHDFS(DB::ContextMutablePtr context, Poco::Logger * log); + void initHDFS(DB::ContextMutablePtr context, LoggerPtr log); int main([[maybe_unused]] const std::vector & args) override; diff --git a/programs/part-toolkit/PartToolkit.cpp b/programs/part-toolkit/PartToolkit.cpp index bf304ec1e7..e62f8eabfc 100644 --- a/programs/part-toolkit/PartToolkit.cpp +++ b/programs/part-toolkit/PartToolkit.cpp @@ -52,7 +52,7 @@ int mainHelp(int, char **) return 0; } -void run(const std::string & query, Poco::Logger * log) +void run(const std::string & query, LoggerPtr log) { LOG_DEBUG(log, "Executing query : {}", query); DB::ThreadStatus status; @@ -133,7 +133,7 @@ int mainEntryClickhousePartToolkit(int argc, char ** argv) { } - Poco::Logger * log = &Poco::Logger::get("part-toolkit"); + LoggerPtr log = getLogger("part-toolkit"); LOG_INFO(log, "Logger level: {}", log_level); diff --git a/programs/server/BrpcServerHolder.h b/programs/server/BrpcServerHolder.h index d253423cc1..c4ae53d8a0 100644 --- a/programs/server/BrpcServerHolder.h +++ b/programs/server/BrpcServerHolder.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -45,7 +46,7 @@ public: else if (global_context->getServerType() == ServerType::cnch_worker) { addService(*rpc_server, rpc_services, CnchWorkerServiceImpl_RegisterService(global_context).service); - LOG_DEBUG(&Poco::Logger::get("BrpcServerHolder"), "Start register RemoteDiskCacheService: {}", host_port); + LOG_DEBUG(getLogger("BrpcServerHolder"), "Start register RemoteDiskCacheService: {}", host_port); addService(*rpc_server, rpc_services, RemoteDiskCacheService_RegisterService(global_context).service); } @@ -61,7 +62,7 @@ public: { start_success = false; if (listen_try) - LOG_ERROR(&Poco::Logger::get("BrpcServerHolder"), "Failed tp start rpc server on {}", host_port); + LOG_ERROR(getLogger("BrpcServerHolder"), "Failed tp start rpc server on {}", host_port); else throw Exception("Failed tp start rpc server on " + host_port, ErrorCodes::BRPC_EXCEPTION); } diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 4d899a2dd4..043b0c1be9 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -325,7 +325,7 @@ static std::string getUserName(uid_t user_id) return toString(user_id); } -Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port, Poco::Logger * log) +Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port, LoggerPtr log) { Poco::Net::SocketAddress socket_address; try @@ -355,7 +355,7 @@ Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port Poco::Net::SocketAddress Server::socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure) const { - auto address = makeSocketAddress(host, port, &logger()); + auto address = makeSocketAddress(host, port, getLogger(logger())); #if !defined(POCO_CLICKHOUSE_PATCH) || POCO_VERSION < 0x01090100 if (secure) /// Bug in old (<1.9.1) poco, listen() after bind() with reusePort param will fail because have no implementation in SecureServerSocketImpl @@ -403,7 +403,7 @@ static void clearOldStoreDirectory(const DisksMap& disk_map) try { - LOG_DEBUG(&Poco::Logger::get(__func__), "Removing {} from disk {}", + LOG_DEBUG(getLogger(__func__), "Removing {} from disk {}", String(fs::path(disk->getPath()) / iter->path()), disk->getName()); disk->removeRecursive(iter->path()); } @@ -508,7 +508,7 @@ void checkForUsersNotInMainConfig( const Poco::Util::AbstractConfiguration & config, const std::string & config_path, const std::string & users_config_path, - Poco::Logger * log) + LoggerPtr log) { if (config.getBool("skip_check_for_incorrect_settings", false)) return; @@ -539,7 +539,7 @@ void checkForUsersNotInMainConfig( void huallocLogPrint(std::string s) { - static Poco::Logger * logger = &Poco::Logger::get("HuallocDebug"); + static LoggerPtr logger = getLogger("HuallocDebug"); LOG_INFO(logger, s); } @@ -571,7 +571,7 @@ void limitMemoryCacheDefaultMaxRatio(RootConfiguration & root_config, const UInt Float32 max_total_ratio = root_config.cache_size_to_ram_max_ratio.value; Float32 lowered_ratio = (total_ratio > max_total_ratio ? max_total_ratio / total_ratio : 1.0f); - Poco::Logger * logger = &Poco::Logger::get("MemoryCacheDefaultRatioLimit"); + auto logger = getLogger("MemoryCacheDefaultRatioLimit"); LOG_INFO(logger, "Total memory {}, max ratio for memory cache is {}{}", formatReadableSizeWithBinarySuffix(memory_amount), max_total_ratio, @@ -601,7 +601,7 @@ void limitMemoryCacheDefaultMaxRatio(RootConfiguration & root_config, const UInt int Server::main(const std::vector & /*args*/) { - Poco::Logger * log = &logger(); + LoggerPtr log = getLogger(logger()); UseSSL use_ssl; diff --git a/programs/server/Server.h b/programs/server/Server.h index d0f30ec1ae..1569e4b32c 100644 --- a/programs/server/Server.h +++ b/programs/server/Server.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include diff --git a/programs/storage-tools/PartInspector.cpp b/programs/storage-tools/PartInspector.cpp index da3dd546a7..457a05c8f5 100644 --- a/programs/storage-tools/PartInspector.cpp +++ b/programs/storage-tools/PartInspector.cpp @@ -344,7 +344,7 @@ void ParallelInspectRunner::InspectTask::exec() { } ParallelInspectRunner::TaskAllocator::TaskAllocator(const String& base_path, FSOp& fs_op, InspectTask::Type type, - const String& stream_name, Poco::Logger* logger): + const String& stream_name, LoggerPtr logger): base_path_(base_path), fs_op_(fs_op), type_(type), logger_(logger), stream_name_(stream_name) { if (fs_op_.isFile(base_path_)) { abs_paths_.push_back(base_path_); @@ -382,7 +382,7 @@ void ParallelInspectRunner::TaskAllocator::collectPaths(const String& base_path, ParallelInspectRunner::ParallelInspectRunner(const String& base_path, FSOp& fs_op, size_t worker_threads, InspectTask::Type type, const String& stream_name): - task_allocator_(base_path, fs_op, type, stream_name, &Poco::Logger::get("ParallelInspectRunner")) { + task_allocator_(base_path, fs_op, type, stream_name, getLogger("ParallelInspectRunner")) { worker_pool_ = std::make_unique(worker_threads, worker_threads, worker_threads); for (size_t i = 0; i < worker_threads; ++i) { diff --git a/programs/storage-tools/PartInspector.h b/programs/storage-tools/PartInspector.h index dea661be45..848a907c22 100644 --- a/programs/storage-tools/PartInspector.h +++ b/programs/storage-tools/PartInspector.h @@ -1,3 +1,4 @@ +#include #include #include #include @@ -129,14 +130,14 @@ public: ALL = BRIEF | CHECKSUMS, }; - InspectTask(const String& path, FSOp& fs, Type type, const String& stream_name, Poco::Logger* logger): + InspectTask(const String& path, FSOp& fs, Type type, const String& stream_name, LoggerPtr logger): type_(type), logger_(logger), stream_name_(stream_name), inspector_(path, fs) {} void exec(); private: Type type_; - Poco::Logger* logger_; + LoggerPtr logger_; String stream_name_; PartInspector inspector_; }; @@ -144,7 +145,7 @@ public: class TaskAllocator { public: TaskAllocator(const String& base_path, FSOp& fs_op, InspectTask::Type type, - const String& stream_name, Poco::Logger* logger); + const String& stream_name, LoggerPtr logger); std::unique_ptr acquire(); @@ -157,7 +158,7 @@ public: InspectTask::Type type_; - Poco::Logger* logger_; + LoggerPtr logger_; String stream_name_; diff --git a/src/Access/AccessRights.cpp b/src/Access/AccessRights.cpp index 8a1cce5393..00f159ce43 100644 --- a/src/Access/AccessRights.cpp +++ b/src/Access/AccessRights.cpp @@ -483,7 +483,7 @@ public: optimizeTree(); } - void logTree(Poco::Logger * log, const String & title) const + void logTree(LoggerPtr log, const String & title) const { LOG_TRACE(log, "Tree({}): level={}, name={}, flags={}, min_flags={}, max_flags={}, num_children={}, is_sensitive={}", title, level, node_name ? *node_name : "NULL", flags.toString(), @@ -1250,7 +1250,7 @@ AccessRights AccessRights::getFullAccess() template void AccessRightsBase::logTree() const { - auto * log = &Poco::Logger::get("AccessRights"); + auto log = getLogger("AccessRights"); if (root) { root->logTree(log, ""); diff --git a/src/Access/AllowedClientHosts.cpp b/src/Access/AllowedClientHosts.cpp index c7682bd2fc..6865d6a2fd 100644 --- a/src/Access/AllowedClientHosts.cpp +++ b/src/Access/AllowedClientHosts.cpp @@ -247,7 +247,7 @@ bool AllowedClientHosts::contains(const IPAddress & client_address) const throw; /// Try to ignore DNS errors: if host cannot be resolved, skip it and try next. LOG_WARNING( - &Poco::Logger::get("AddressPatterns"), + getLogger("AddressPatterns"), "Failed to check if the allowed client hosts contain address {}. {}, code = {}", client_address.toString(), e.displayText(), e.code()); return false; @@ -280,7 +280,7 @@ bool AllowedClientHosts::contains(const IPAddress & client_address) const throw; /// Try to ignore DNS errors: if host cannot be resolved, skip it and try next. LOG_WARNING( - &Poco::Logger::get("AddressPatterns"), + getLogger("AddressPatterns"), "Failed to check if the allowed client hosts contain address {}. {}, code = {}", client_address.toString(), e.displayText(), e.code()); return false; diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index 8be282fac6..33e87cef14 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -357,7 +357,7 @@ void ContextAccess::setUser(const UserPtr & user_) const } user_name = user->getName(); - trace_log = &Poco::Logger::get("ContextAccess (" + user_name + ")"); + trace_log = getLogger("ContextAccess (" + user_name + ")"); std::vector current_roles, current_roles_with_admin_option; diff --git a/src/Access/ContextAccess.h b/src/Access/ContextAccess.h index 91098e95ef..839c48f9c1 100644 --- a/src/Access/ContextAccess.h +++ b/src/Access/ContextAccess.h @@ -2,11 +2,12 @@ #include #include -#include +#include #include +#include +#include #include #include -#include #include @@ -223,7 +224,7 @@ private: const AccessControlManager * manager = nullptr; const Params params; bool is_full_access = false; - mutable Poco::Logger * trace_log = nullptr; + mutable LoggerPtr trace_log = nullptr; mutable UserPtr user; mutable String user_name; mutable scope_guard subscription_for_user_change; diff --git a/src/Access/DiskAccessStorage.cpp b/src/Access/DiskAccessStorage.cpp index 0d017d5568..1e4afcba15 100644 --- a/src/Access/DiskAccessStorage.cpp +++ b/src/Access/DiskAccessStorage.cpp @@ -199,7 +199,7 @@ namespace } - AccessEntityPtr tryReadEntityFile(const String & file_path, Poco::Logger & log) + AccessEntityPtr tryReadEntityFile(const String & file_path, LoggerPtr log) { try { @@ -207,7 +207,7 @@ namespace } catch (...) { - tryLogCurrentException(&log, "Could not parse " + file_path); + tryLogCurrentException(log, "Could not parse " + file_path); return nullptr; } } @@ -560,7 +560,7 @@ bool DiskAccessStorage::rebuildLists() continue; const auto access_entity_file_path = getEntityFilePath(directory_path, id); - auto entity = tryReadEntityFile(access_entity_file_path, *getLogger()); + auto entity = tryReadEntityFile(access_entity_file_path, getLogger()); if (!entity) continue; diff --git a/src/Access/ExternalAuthenticators.cpp b/src/Access/ExternalAuthenticators.cpp index d4100c4e52..3bd891eb02 100644 --- a/src/Access/ExternalAuthenticators.cpp +++ b/src/Access/ExternalAuthenticators.cpp @@ -239,7 +239,7 @@ void ExternalAuthenticators::reset() kerberos_params.reset(); } -void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log) +void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfiguration & config, LoggerPtr log) { std::scoped_lock lock(mutex); reset(); diff --git a/src/Access/ExternalAuthenticators.h b/src/Access/ExternalAuthenticators.h index 24f1f7b652..4f95160946 100644 --- a/src/Access/ExternalAuthenticators.h +++ b/src/Access/ExternalAuthenticators.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -30,7 +31,7 @@ class ExternalAuthenticators { public: void reset(); - void setConfiguration(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log); + void setConfiguration(const Poco::Util::AbstractConfiguration & config, LoggerPtr log); // The name and readiness of the credentials must be verified before calling these. bool checkLDAPCredentials(const String & server, const BasicCredentials & credentials, diff --git a/src/Access/GSSAcceptor.cpp b/src/Access/GSSAcceptor.cpp index a33e825f51..54edbaccaf 100644 --- a/src/Access/GSSAcceptor.cpp +++ b/src/Access/GSSAcceptor.cpp @@ -337,7 +337,7 @@ void GSSAcceptorContext::initHandles() } } -String GSSAcceptorContext::processToken(const String & input_token, Poco::Logger * log) +String GSSAcceptorContext::processToken(const String & input_token, LoggerPtr log) { std::scoped_lock lock(gss_global_mutex); @@ -459,7 +459,7 @@ void GSSAcceptorContext::initHandles() { } -String GSSAcceptorContext::processToken(const String &, Poco::Logger *) +String GSSAcceptorContext::processToken(const String &, LoggerPtr) { throw Exception("ClickHouse was built without GSS-API/Kerberos support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME); } diff --git a/src/Access/GSSAcceptor.h b/src/Access/GSSAcceptor.h index 8d207c59c0..cc1d536ff3 100644 --- a/src/Access/GSSAcceptor.h +++ b/src/Access/GSSAcceptor.h @@ -5,6 +5,7 @@ #endif #include +#include #include #include @@ -42,7 +43,7 @@ public: const String & getRealm() const; bool isFailed() const; - MAYBE_NORETURN String processToken(const String & input_token, Poco::Logger * log); + MAYBE_NORETURN String processToken(const String & input_token, LoggerPtr log); private: void reset(); diff --git a/src/Access/IAccessStorage.cpp b/src/Access/IAccessStorage.cpp index 275eeecacd..4872fa3f3c 100644 --- a/src/Access/IAccessStorage.cpp +++ b/src/Access/IAccessStorage.cpp @@ -538,12 +538,12 @@ UUID IAccessStorage::generateRandomID() } -Poco::Logger * IAccessStorage::getLogger() const +LoggerPtr IAccessStorage::getLogger() const { - Poco::Logger * ptr = log.load(); - if (!ptr) - log.store(ptr = &Poco::Logger::get("Access(" + storage_name + ")"), std::memory_order_relaxed); - return ptr; + callOnce(log_initialized, [&] { + log = ::getLogger("Access(" + storage_name + ")"); + }); + return log; } diff --git a/src/Access/IAccessStorage.h b/src/Access/IAccessStorage.h index 3565ceca70..4fa8e12764 100644 --- a/src/Access/IAccessStorage.h +++ b/src/Access/IAccessStorage.h @@ -1,6 +1,8 @@ #pragma once #include +#include +#include #include #include #include @@ -10,7 +12,6 @@ #include -namespace Poco { class Logger; } namespace Poco::Net { class IPAddress; } namespace DB @@ -178,7 +179,7 @@ protected: virtual UUID getIDOfLoggedUserImpl(const String & user_name) const; static UUID generateRandomID(); - Poco::Logger * getLogger() const; + LoggerPtr getLogger() const; static String outputEntityTypeAndName(EntityType type, const String & name) { return EntityTypeInfo::get(type).outputWithEntityName(name); } [[noreturn]] void throwNotFound(const UUID & id) const; [[noreturn]] void throwNotFound(EntityType type, const String & name) const; @@ -200,7 +201,8 @@ protected: private: const String storage_name; - mutable std::atomic log = nullptr; + mutable OnceFlag log_initialized; + mutable LoggerPtr log = nullptr; }; diff --git a/src/Access/KerberosInit.cpp b/src/Access/KerberosInit.cpp index c4919c151b..fc6f0a7fe5 100644 --- a/src/Access/KerberosInit.cpp +++ b/src/Access/KerberosInit.cpp @@ -87,7 +87,7 @@ String KerberosInit::fmtError(krb5_error_code code) const void KerberosInit::init(const String & keytab_file, const String & principal, const String & cache_name) { - auto * log = &Poco::Logger::get("KerberosInit"); + auto log = getLogger("KerberosInit"); LOG_TRACE(log,"Trying to authenticate with Kerberos v5"); krb5_error_code ret; diff --git a/src/Access/LDAPClient.cpp b/src/Access/LDAPClient.cpp index c12b0f22ab..7e1bf2acd0 100644 --- a/src/Access/LDAPClient.cpp +++ b/src/Access/LDAPClient.cpp @@ -475,7 +475,7 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params) for (std::size_t i = 0; referrals[i]; i++) { - LOG_WARNING(&Poco::Logger::get("LDAPClient"), "Received reference during LDAP search but not following it: {}", referrals[i]); + LOG_WARNING(getLogger("LDAPClient"), "Received reference during LDAP search but not following it: {}", referrals[i]); } } diff --git a/src/Access/RowPolicyCache.cpp b/src/Access/RowPolicyCache.cpp index c2fa18b650..6d062c903c 100644 --- a/src/Access/RowPolicyCache.cpp +++ b/src/Access/RowPolicyCache.cpp @@ -105,7 +105,7 @@ void RowPolicyCache::PolicyInfo::setPolicy(const RowPolicyPtr & policy_) catch (...) { tryLogCurrentException( - &Poco::Logger::get("RowPolicy"), + getLogger("RowPolicy"), String("Could not parse the condition ") + toString(type) + " of row policy " + backQuote(policy->getName())); } diff --git a/src/Access/SaslClient.cpp b/src/Access/SaslClient.cpp index b36c3fcb52..6f610207f7 100644 --- a/src/Access/SaslClient.cpp +++ b/src/Access/SaslClient.cpp @@ -45,7 +45,7 @@ namespace SaslCommon static int saslLogCallbacks(void * context, int level, const char * message) { String auth_context = reinterpret_cast(context); - auto * log = &Poco::Logger::get(auth_context); + auto log = getLogger(auth_context); const String auth_message(message); if (!message) @@ -179,7 +179,7 @@ void SaslClient::saslInit(sasl_callback_t * callbacks) { if (SaslCommon::sasl_inited) { - LOG_WARNING(&Poco::Logger::get("SaslClient"), "Sasl Client is already Init"); + LOG_WARNING(getLogger("SaslClient"), "Sasl Client is already Init"); return; } int result = sasl_client_init(callbacks); diff --git a/src/Advisor/Advisor.h b/src/Advisor/Advisor.h index 7195b17499..184dd8c0a2 100644 --- a/src/Advisor/Advisor.h +++ b/src/Advisor/Advisor.h @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -24,7 +25,7 @@ public: static WorkloadAdvisors getAdvisors(ASTAdviseQuery::AdvisorType type); ASTAdviseQuery::AdvisorType type; - Poco::Logger * log = &Poco::Logger::get("Advisor"); + LoggerPtr log = getLogger("Advisor"); }; } diff --git a/src/Advisor/Rules/ClusterKeyAdvise.h b/src/Advisor/Rules/ClusterKeyAdvise.h index 0a966f71c6..7a95002b19 100644 --- a/src/Advisor/Rules/ClusterKeyAdvise.h +++ b/src/Advisor/Rules/ClusterKeyAdvise.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -17,7 +18,7 @@ public: private: bool isValidColumn(const QualifiedColumnName & column, AdvisorContext & context) const; - Poco::Logger * log = &Poco::Logger::get("ClusterKeyAdvisor"); + LoggerPtr log = getLogger("ClusterKeyAdvisor"); }; } diff --git a/src/Advisor/Rules/MaterializedViewAdvise.h b/src/Advisor/Rules/MaterializedViewAdvise.h index 569768aff1..bf25fbffeb 100644 --- a/src/Advisor/Rules/MaterializedViewAdvise.h +++ b/src/Advisor/Rules/MaterializedViewAdvise.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -53,7 +54,7 @@ private: const OutputType output_type; const bool only_aggregate; const bool ignore_filter; - Poco::Logger * log = &Poco::Logger::get("MaterializedViewAdvisor"); + LoggerPtr log = getLogger("MaterializedViewAdvisor"); }; /** diff --git a/src/Advisor/Rules/PartitionKeyAdvise.cpp b/src/Advisor/Rules/PartitionKeyAdvise.cpp index 4a7dce2343..f5f72c3c42 100644 --- a/src/Advisor/Rules/PartitionKeyAdvise.cpp +++ b/src/Advisor/Rules/PartitionKeyAdvise.cpp @@ -185,7 +185,7 @@ private: double optimal_cost; std::unordered_set explored; - Poco::Logger * log = &Poco::Logger::get("PartitionKeyAdvisor"); + LoggerPtr log = getLogger("PartitionKeyAdvisor"); }; diff --git a/src/Advisor/Rules/PartitionKeyAdvise.h b/src/Advisor/Rules/PartitionKeyAdvise.h index 3e3606cd58..05105f521c 100644 --- a/src/Advisor/Rules/PartitionKeyAdvise.h +++ b/src/Advisor/Rules/PartitionKeyAdvise.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -27,7 +28,7 @@ private: std::vector getSortedInterestingColumns(AdvisorContext & context) const; bool isValidColumn(const QualifiedColumnName & column, AdvisorContext & context) const; - Poco::Logger * log = &Poco::Logger::get("PartitionKeyAdvisor"); + LoggerPtr log = getLogger("PartitionKeyAdvisor"); static constexpr bool enable_memo_based_advise = 1; }; diff --git a/src/Advisor/WorkloadQuery.cpp b/src/Advisor/WorkloadQuery.cpp index 8396cfe2fe..bd161345b4 100644 --- a/src/Advisor/WorkloadQuery.cpp +++ b/src/Advisor/WorkloadQuery.cpp @@ -134,7 +134,7 @@ WorkloadQueries WorkloadQuery::build(const std::vector & queries, c setThreadName("BuildQuery"); if (thread_group) CurrentThread::attachToIfDetached(thread_group); - LOG_DEBUG(&Poco::Logger::get("WorkloadQuery"), "start building query {}", i); + LOG_DEBUG(getLogger("WorkloadQuery"), "start building query {}", i); const auto & query = queries[i]; try { @@ -142,7 +142,7 @@ WorkloadQueries WorkloadQuery::build(const std::vector & queries, c res[i] = std::move(workload_query); } catch (Exception & e) { - LOG_WARNING(&Poco::Logger::get("WorkloadQuery"), + LOG_WARNING(getLogger("WorkloadQuery"), "failed to build query, reason: {}, sql: {}", e.message(), query); } @@ -150,7 +150,7 @@ WorkloadQueries WorkloadQuery::build(const std::vector & queries, c } query_thread_pool.wait(); res.erase(std::remove(res.begin(), res.end(), nullptr), res.end()); - LOG_DEBUG(&Poco::Logger::get("WorkloadQuery"), "built queries {}/{}", res.size(), queries.size()); + LOG_DEBUG(getLogger("WorkloadQuery"), "built queries {}/{}", res.size(), queries.size()); return res; } diff --git a/src/Advisor/WorkloadTableStats.cpp b/src/Advisor/WorkloadTableStats.cpp index 35592a5583..05ef985e8f 100644 --- a/src/Advisor/WorkloadTableStats.cpp +++ b/src/Advisor/WorkloadTableStats.cpp @@ -42,7 +42,7 @@ WorkloadTableStats WorkloadTableStats::build(ContextPtr context, const String & collector.readAllFromCatalog(); basic_stats = collector.toPlanNodeStatistics().value_or(nullptr); if (basic_stats) - LOG_DEBUG(&Poco::Logger::get("WorkloadTableStats"), "Stats for table {}.{}: {} rows, {} symbols", + LOG_DEBUG(getLogger("WorkloadTableStats"), "Stats for table {}.{}: {} rows, {} symbols", database_name, table_name, basic_stats->getRowCount(), basic_stats->getSymbolStatistics().size()); } catch (...) {} @@ -80,7 +80,7 @@ WorkloadExtendedStatsPtr WorkloadTableStats::collectExtendedStats( query.pop_back(); query += fmt::format(" FROM {}.{}", database, table); - LOG_DEBUG(&Poco::Logger::get("WorkloadTableStats"), "Collecting extended stats for table: {}", query); + LOG_DEBUG(getLogger("WorkloadTableStats"), "Collecting extended stats for table: {}", query); Statistics::SubqueryHelper subquery_helper = Statistics::SubqueryHelper::create(context, query); Block result = Statistics::getOnlyRowFrom(subquery_helper); diff --git a/src/AggregateFunctions/AggregateFunctionPathSplit.h b/src/AggregateFunctions/AggregateFunctionPathSplit.h index 72d43f08d6..2319e8d19d 100644 --- a/src/AggregateFunctions/AggregateFunctionPathSplit.h +++ b/src/AggregateFunctions/AggregateFunctionPathSplit.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -124,7 +125,7 @@ struct AggregateFunctionPathSplitData s += "Event(index=" + std::to_string(events[i].index) + ", time=" + std::to_string(events[i].time) + ", param=" + events[i].param.toString() + ").\n"; } - LOG_DEBUG(&Poco::Logger::get("AggregateFunctionPathSplit"), "events:" + s + "."); + LOG_DEBUG(getLogger("AggregateFunctionPathSplit"), "events:" + s + "."); } }; diff --git a/src/AggregateFunctions/AggregateFunctionSessionAnalysis.h b/src/AggregateFunctions/AggregateFunctionSessionAnalysis.h index 376e1ce10f..b644fe5ca9 100644 --- a/src/AggregateFunctions/AggregateFunctionSessionAnalysis.h +++ b/src/AggregateFunctions/AggregateFunctionSessionAnalysis.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -125,7 +126,7 @@ struct AggregateFunctionSessionAnalysisData String s = "Event size: " + std::to_string(events.size()) + "\n"; for (const auto & event : events) s += "Event(type=" + std::to_string(event.type) + ", time=" + std::to_string(event.time) + ", value=" + event.value.toString() + ")\n"; - LOG_DEBUG(&Poco::Logger::get("AggregateFunctionSessionAnalysis"), "events:" + s + "."); + LOG_DEBUG(getLogger("AggregateFunctionSessionAnalysis"), "events:" + s + "."); } }; diff --git a/src/AggregateFunctions/AggregateFunctionSessionSplit.h b/src/AggregateFunctions/AggregateFunctionSessionSplit.h index 4703e4f69e..7fdf15ad24 100644 --- a/src/AggregateFunctions/AggregateFunctionSessionSplit.h +++ b/src/AggregateFunctions/AggregateFunctionSessionSplit.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -146,7 +147,7 @@ struct AggregateFunctionSessionSplitData << "Sorted " << std::to_string(size) << " rows SessionEvent data." << " in " << elapsed << " sec."; - LOG_TRACE(&Poco::Logger::get(__PRETTY_FUNCTION__), log_helper.str()); + LOG_TRACE(getLogger(__PRETTY_FUNCTION__), log_helper.str()); sorted = true; } @@ -195,7 +196,7 @@ struct AggregateFunctionSessionSplitData << " in " << elapsed << " sec." << " (" << other.events.size() / elapsed << " rows/sec.)"; - LOG_TRACE(&Poco::Logger::get(__PRETTY_FUNCTION__), log_helper.str()); + LOG_TRACE(getLogger(__PRETTY_FUNCTION__), log_helper.str()); } void serialize(WriteBuffer & buf) const diff --git a/src/Analyzers/Analysis.h b/src/Analyzers/Analysis.h index cbe3fd6184..b67bcd3055 100644 --- a/src/Analyzers/Analysis.h +++ b/src/Analyzers/Analysis.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -243,7 +244,7 @@ using ListMultimap = std::unordered_map>; struct Analysis { ScopeFactory scope_factory; - Poco::Logger * logger = &Poco::Logger::get("Analysis"); + LoggerPtr logger = getLogger("Analysis"); /// Scopes // Regular scopes in an ASTSelectQuery, kept by below convention: diff --git a/src/Analyzers/QueryAnalyzer.cpp b/src/Analyzers/QueryAnalyzer.cpp index 61ccd25676..b2fbc466be 100644 --- a/src/Analyzers/QueryAnalyzer.cpp +++ b/src/Analyzers/QueryAnalyzer.cpp @@ -145,7 +145,7 @@ private: const bool enable_subcolumn_optimization_through_union; const bool enable_implicit_arg_type_convert; // MySQL implicit cast rules - Poco::Logger * logger = &Poco::Logger::get("QueryAnalyzerVisitor"); + LoggerPtr logger = getLogger("QueryAnalyzerVisitor"); void analyzeSetOperation(ASTPtr & node, ASTs & selects); diff --git a/src/Analyzers/QueryRewriter.cpp b/src/Analyzers/QueryRewriter.cpp index cf5040d1ec..e96d54715b 100644 --- a/src/Analyzers/QueryRewriter.cpp +++ b/src/Analyzers/QueryRewriter.cpp @@ -548,7 +548,7 @@ namespace ASTPtr QueryRewriter::rewrite(ASTPtr query, ContextMutablePtr context, bool enable_materialized_view) { - const auto * logger = &Poco::Logger::get("QueryRewriter"); + const auto logger = getLogger("QueryRewriter"); (void) enable_materialized_view; graphviz_index = GraphvizPrinter::PRINT_AST_INDEX; diff --git a/src/Analyzers/RewriteFusionMerge.cpp b/src/Analyzers/RewriteFusionMerge.cpp index 8f1b053a1b..678661217e 100644 --- a/src/Analyzers/RewriteFusionMerge.cpp +++ b/src/Analyzers/RewriteFusionMerge.cpp @@ -295,7 +295,7 @@ void RewriteFusionMerge::visit(ASTTableExpression & table_expr, ASTPtr &) table_expr.children.push_back(table_expr.subquery); LOG_DEBUG( - &Poco::Logger::get("RewriteFusionMerge"), "Rewrite {} to {}", serializeAST(*table_func_ptr), serializeAST(*select_union_query)); + getLogger("RewriteFusionMerge"), "Rewrite {} to {}", serializeAST(*table_func_ptr), serializeAST(*select_union_query)); } } diff --git a/src/Bridge/IBridge.cpp b/src/Bridge/IBridge.cpp index bd930ea6e4..7f9fd493ec 100644 --- a/src/Bridge/IBridge.cpp +++ b/src/Bridge/IBridge.cpp @@ -54,7 +54,7 @@ namespace ErrorCodes namespace { - Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port, Poco::Logger * log) + Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port, LoggerPtr log) { Poco::Net::SocketAddress socket_address; try @@ -78,7 +78,7 @@ namespace return socket_address; } - Poco::Net::SocketAddress socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, Poco::Logger * log) + Poco::Net::SocketAddress socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, LoggerPtr log) { auto address = makeSocketAddress(host, port, log); #if POCO_VERSION < 0x01080000 @@ -190,7 +190,7 @@ void IBridge::initialize(Application & self) BaseDaemon::logRevision(); - log = &logger(); + log = getLogger(logger()); hostname = config().getString("listen-host", "127.0.0.1"); port = config().getUInt("http-port"); if (port > 0xFFFF) diff --git a/src/Bridge/IBridge.h b/src/Bridge/IBridge.h index c64003d995..8525141b72 100644 --- a/src/Bridge/IBridge.h +++ b/src/Bridge/IBridge.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -46,6 +47,6 @@ private: size_t max_server_connections; size_t http_timeout; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/BridgeHelper/IBridgeHelper.h b/src/BridgeHelper/IBridgeHelper.h index feac49d091..7b6c3e4519 100644 --- a/src/BridgeHelper/IBridgeHelper.h +++ b/src/BridgeHelper/IBridgeHelper.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -54,7 +55,7 @@ protected: virtual const Poco::Util::AbstractConfiguration & getConfig() const = 0; - virtual Poco::Logger * getLog() const = 0; + virtual LoggerPtr getLog() const = 0; virtual Poco::Timespan getHTTPTimeout() const = 0; diff --git a/src/BridgeHelper/LibraryBridgeHelper.cpp b/src/BridgeHelper/LibraryBridgeHelper.cpp index d4b2d57ad6..3f3f44700f 100644 --- a/src/BridgeHelper/LibraryBridgeHelper.cpp +++ b/src/BridgeHelper/LibraryBridgeHelper.cpp @@ -8,7 +8,7 @@ namespace DB LibraryBridgeHelper::LibraryBridgeHelper(ContextPtr context_) : IBridgeHelper(context_) , config(context_->getConfigRef()) - , log(&Poco::Logger::get("LibraryBridgeHelper")) + , log(getLogger("LibraryBridgeHelper")) , http_timeout(context_->getGlobalContext()->getSettingsRef().http_receive_timeout.value) , bridge_host(config.getString("library_bridge.host", DEFAULT_HOST)) , bridge_port(config.getUInt("library_bridge.port", DEFAULT_PORT)) diff --git a/src/BridgeHelper/LibraryBridgeHelper.h b/src/BridgeHelper/LibraryBridgeHelper.h index 77872f8684..b2b867b484 100644 --- a/src/BridgeHelper/LibraryBridgeHelper.h +++ b/src/BridgeHelper/LibraryBridgeHelper.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -31,7 +32,7 @@ protected: const Poco::Util::AbstractConfiguration & getConfig() const override { return config; } - Poco::Logger * getLog() const override { return log; } + LoggerPtr getLog() const override { return log; } Poco::Timespan getHTTPTimeout() const override { return http_timeout; } @@ -40,7 +41,7 @@ protected: static constexpr inline size_t DEFAULT_PORT = 9012; const Poco::Util::AbstractConfiguration & config; - Poco::Logger * log; + LoggerPtr log; const Poco::Timespan http_timeout; std::string bridge_host; size_t bridge_port; diff --git a/src/BridgeHelper/XDBCBridgeHelper.h b/src/BridgeHelper/XDBCBridgeHelper.h index 9033467b7c..79315e9e1e 100644 --- a/src/BridgeHelper/XDBCBridgeHelper.h +++ b/src/BridgeHelper/XDBCBridgeHelper.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -67,7 +68,7 @@ public: Poco::Timespan http_timeout_, const std::string & connection_string_) : IXDBCBridgeHelper(context_->getGlobalContext()) - , log(&Poco::Logger::get(BridgeHelperMixin::getName() + "BridgeHelper")) + , log(getLogger(BridgeHelperMixin::getName() + "BridgeHelper")) , connection_string(connection_string_) , http_timeout(http_timeout_) , config(context_->getGlobalContext()->getConfigRef()) @@ -122,7 +123,7 @@ protected: const Poco::Util::AbstractConfiguration & getConfig() const override { return config; } - Poco::Logger * getLog() const override { return log; } + LoggerPtr getLog() const override { return log; } bool startBridgeManually() const override { return BridgeHelperMixin::startBridgeManually(); } @@ -144,7 +145,7 @@ protected: private: using Configuration = Poco::Util::AbstractConfiguration; - Poco::Logger * log; + LoggerPtr log; std::string connection_string; Poco::Timespan http_timeout; std::string bridge_host; diff --git a/src/Catalog/Catalog.cpp b/src/Catalog/Catalog.cpp index 8883e87589..f9de09b83b 100644 --- a/src/Catalog/Catalog.cpp +++ b/src/Catalog/Catalog.cpp @@ -4096,7 +4096,7 @@ namespace Catalog return txn_undobuffers; } - Catalog::UndoBufferIterator::UndoBufferIterator(IMetaStore::IteratorPtr metastore_iter_, Poco::Logger * log_) + Catalog::UndoBufferIterator::UndoBufferIterator(IMetaStore::IteratorPtr metastore_iter_, LoggerPtr log_) : metastore_iter{std::move(metastore_iter_)}, log{log_} {} @@ -7340,7 +7340,7 @@ namespace Catalog void notifyOtherServersOnAccessEntityChange(const Context & context, EntityType type, const String & name, const UUID & uuid) { - static Poco::Logger * log = &Poco::Logger::get("Catalog::notifyOtherServersOnAccessEntityChange"); + static LoggerPtr log = getLogger("Catalog::notifyOtherServersOnAccessEntityChange"); std::shared_ptr topology_master = context.getCnchTopologyMaster(); if (!topology_master) { @@ -7770,7 +7770,7 @@ namespace Catalog } catch (...) { - tryLogCurrentException(&Poco::Logger::get("Catalog::getLastModificationTimeHints")); + tryLogCurrentException(getLogger("Catalog::getLastModificationTimeHints")); } } diff --git a/src/Catalog/Catalog.h b/src/Catalog/Catalog.h index 14e1d005c6..e4bd61091b 100644 --- a/src/Catalog/Catalog.h +++ b/src/Catalog/Catalog.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -506,7 +507,7 @@ public: class UndoBufferIterator { public: - UndoBufferIterator(IMetaStore::IteratorPtr metastore_iter, Poco::Logger * log); + UndoBufferIterator(IMetaStore::IteratorPtr metastore_iter, LoggerPtr log); const UndoResource & getUndoResource() const; bool next(); bool is_valid() const /// for testing @@ -517,7 +518,7 @@ public: IMetaStore::IteratorPtr metastore_iter; std::optional cur_undo_resource; bool valid = false; - Poco::Logger * log; + LoggerPtr log; }; UndoBufferIterator getUndoBufferIterator() const; @@ -918,7 +919,7 @@ public: void shutDown() {bg_task.reset();} private: - Poco::Logger * log = &Poco::Logger::get("Catalog"); + LoggerPtr log = getLogger("Catalog"); Context & context; MetastoreProxyPtr meta_proxy; const String name_space; diff --git a/src/Catalog/CatalogBackgroundTask.h b/src/Catalog/CatalogBackgroundTask.h index d252cf040e..684ab0ffaa 100644 --- a/src/Catalog/CatalogBackgroundTask.h +++ b/src/Catalog/CatalogBackgroundTask.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include @@ -27,7 +28,7 @@ private: void cleanStaleLargeKV(); - Poco::Logger * log = &Poco::Logger::get("CatalogBGTask"); + LoggerPtr log = getLogger("CatalogBGTask"); ContextPtr context; std::shared_ptr metastore; diff --git a/src/Catalog/CatalogFactory.cpp b/src/Catalog/CatalogFactory.cpp index ec11f7531a..7211929723 100644 --- a/src/Catalog/CatalogFactory.cpp +++ b/src/Catalog/CatalogFactory.cpp @@ -125,7 +125,7 @@ ASTPtr CatalogFactory::getCreateDictionaryByDataModel(const DB::Protos::DataMode } catch (Exception &) { - LOG_WARNING(&Poco::Logger::get("CatalogFactory"), "Dictionary create query parse failed: query {}", create_query); + LOG_WARNING(getLogger("CatalogFactory"), "Dictionary create query parse failed: query {}", create_query); throw; } diff --git a/src/Catalog/FDBClient.cpp b/src/Catalog/FDBClient.cpp index 73e37dbaa7..8ef1f193e2 100644 --- a/src/Catalog/FDBClient.cpp +++ b/src/Catalog/FDBClient.cpp @@ -77,7 +77,7 @@ static fdb_error_t RunWithRetry(FDBTransactionPtr tr, size_t max_retry, Runnable if (fdb_error_t f_code = waitFuture(f->future); f_code) return code; // continue the loop and perform the operation again. - LOG_WARNING(&Poco::Logger::get("FDBClient::RunWithRetry"), "Try perform the transaction again with retryable error : {}, remain retry time: {}", + LOG_WARNING(getLogger("FDBClient::RunWithRetry"), "Try perform the transaction again with retryable error : {}, remain retry time: {}", std::string(fdb_get_error(code)), max_retry); } } @@ -478,7 +478,7 @@ bool Iterator::Next(fdb_error_t & code) if (code == FDBError::FDB_transaction_too_old || code == FDBError::FDB_transaction_timed_out) { - LOG_DEBUG(&Poco::Logger::get("FDBIterator"), "Transaction timeout or too old, create new transaction"); + LOG_DEBUG(getLogger("FDBIterator"), "Transaction timeout or too old, create new transaction"); tr = std::make_shared(); Catalog::MetastoreFDBImpl::check_fdb_op(client->CreateTransaction(tr)); continue; diff --git a/src/Catalog/MetastoreProxy.cpp b/src/Catalog/MetastoreProxy.cpp index 1121e69b49..50da1762e1 100644 --- a/src/Catalog/MetastoreProxy.cpp +++ b/src/Catalog/MetastoreProxy.cpp @@ -553,7 +553,7 @@ String MetastoreProxy::getMvMetaVersion(const String & name_space, const String { String mv_meta_version_ts; metastore_ptr->get(matViewVersionKey(name_space, uuid), mv_meta_version_ts); - LOG_TRACE(&Poco::Logger::get("MetaStore"), "get mv meta, version {}.", mv_meta_version_ts); + LOG_TRACE(getLogger("MetaStore"), "get mv meta, version {}.", mv_meta_version_ts); if (mv_meta_version_ts.empty()) return ""; @@ -565,7 +565,7 @@ BatchCommitRequest MetastoreProxy::constructMvMetaRequests(const String & name_s std::vector> drop_partitions, String mv_version_ts) { - LOG_TRACE(&Poco::Logger::get("MetaStore"), "construct mv meta, version {}.", mv_version_ts); + LOG_TRACE(getLogger("MetaStore"), "construct mv meta, version {}.", mv_version_ts); BatchCommitRequest multi_write; for (const auto & add : add_partitions) @@ -575,7 +575,7 @@ BatchCommitRequest MetastoreProxy::constructMvMetaRequests(const String & name_s add->SerializeToString(&value); String key = matViewBaseTablesKey(name_space, uuid, base_uuid, add->partition()); multi_write.AddPut(SinglePutRequest(key, value)); - LOG_TRACE(&Poco::Logger::get("MetaStore"), "add key {} value size {}.", key, value.size()); + LOG_TRACE(getLogger("MetaStore"), "add key {} value size {}.", key, value.size()); } multi_write.AddPut(SinglePutRequest(matViewVersionKey(name_space, uuid), mv_version_ts)); @@ -587,7 +587,7 @@ BatchCommitRequest MetastoreProxy::constructMvMetaRequests(const String & name_s drop->SerializeToString(&value); String key = matViewBaseTablesKey(name_space, uuid, base_uuid, drop->partition()); multi_write.AddDelete(SinglePutRequest(key, value)); - LOG_TRACE(&Poco::Logger::get("MetaStore"), "drop key {}.", key); + LOG_TRACE(getLogger("MetaStore"), "drop key {}.", key); } return multi_write; @@ -605,7 +605,7 @@ void MetastoreProxy::updateMvMeta(const String & name_space, const String & uuid String serialized_meta; p.SerializeToString(&serialized_meta); metastore_ptr->put(matViewBaseTablesKey(name_space, uuid, base_uuid, p.partition()), serialized_meta); - LOG_TRACE(&Poco::Logger::get("MetaStore"), "value size {}.", serialized_meta.size()); + LOG_TRACE(getLogger("MetaStore"), "value size {}.", serialized_meta.size()); } } } @@ -1188,7 +1188,7 @@ void MetastoreProxy::createMutation(const String & name_space, const String & uu void MetastoreProxy::removeMutation(const String & name_space, const String & uuid, const String & mutation_name) { - LOG_TRACE(&Poco::Logger::get(__func__), "Removing mutation {}", mutation_name); + LOG_TRACE(getLogger(__func__), "Removing mutation {}", mutation_name); metastore_ptr->drop(tableMutationKey(name_space, uuid, mutation_name)); } @@ -1425,7 +1425,7 @@ void MetastoreProxy::clearIntents(const String & name_space, const String & inte auto snapshot = metastore_ptr->multiGet(intent_names); - Poco::Logger * log = &Poco::Logger::get(__func__); + LoggerPtr log = getLogger(__func__); std::vector matched_intent_index; for (size_t i = 0; i < intents.size(); i++) @@ -2816,7 +2816,7 @@ void MetastoreProxy::attachDetachedParts( { auto info_ptr = createPartInfoFromModel(parts.parts(idx).part_info()); String part_key = dataPartKey(name_space, to_uuid, info_ptr->getPartName()); - LOG_TRACE(&Poco::Logger::get("MetaStore"), "[attachDetachedParts] Write part record {}", part_key); + LOG_TRACE(getLogger("MetaStore"), "[attachDetachedParts] Write part record {}", part_key); if (!existing_partitions.contains(info_ptr->partition_id) && !partition_map.contains(info_ptr->partition_id)) { @@ -2829,7 +2829,7 @@ void MetastoreProxy::attachDetachedParts( { auto info_ptr = createPartInfoFromModel(staged_parts.parts(idx).part_info()); String staged_part_key = stagedDataPartKey(name_space, to_uuid, info_ptr->getPartName()); - LOG_TRACE(&Poco::Logger::get("MetaStore"), "[attachDetachedStagedParts] Write part record {}", staged_part_key); + LOG_TRACE(getLogger("MetaStore"), "[attachDetachedStagedParts] Write part record {}", staged_part_key); if (!existing_partitions.contains(info_ptr->partition_id) && !partition_map.contains(info_ptr->partition_id)) { @@ -2845,7 +2845,7 @@ void MetastoreProxy::attachDetachedParts( partition_model.set_id(partition_id); partition_model.set_partition_minmax(partition_minmax); - LOG_TRACE(&Poco::Logger::get("MetaStore"), "[attachDetachedParts] Write partition record {}", + LOG_TRACE(getLogger("MetaStore"), "[attachDetachedParts] Write partition record {}", partition_key); batch_writer.addPut(partition_key, partition_model.SerializeAsString()); @@ -2864,7 +2864,7 @@ void MetastoreProxy::attachDetachedParts( { String detached_part_key = detachedPartKey(name_space, from_uuid, detached_part_names[idx]); - LOG_TRACE(&Poco::Logger::get("MetaStore"), "[attachDetachedParts] Delete detached part record {}", + LOG_TRACE(getLogger("MetaStore"), "[attachDetachedParts] Delete detached part record {}", detached_part_key); batch_writer.addDelete(detached_part_key); @@ -2880,7 +2880,7 @@ void MetastoreProxy::attachDetachedParts( { const auto & bitmap_model_meta = bitmap_meta->getModel(); String detached_bitmap_meta_key = deleteBitmapKey(name_space, to_uuid, *bitmap_model_meta); - LOG_TRACE(&Poco::Logger::get("MetaStore"), "[attachDetachedDeleteBitmaps] Write new bitmap meta record {}", detached_bitmap_meta_key); + LOG_TRACE(getLogger("MetaStore"), "[attachDetachedDeleteBitmaps] Write new bitmap meta record {}", detached_bitmap_meta_key); batch_writer.addPut(detached_bitmap_meta_key, bitmap_model_meta->SerializeAsString()); } @@ -2893,7 +2893,7 @@ void MetastoreProxy::attachDetachedParts( for (auto & bitmap_meta: detached_bitmaps) { String detached_bitmap_meta_key = detachedDeleteBitmapKey(name_space, from_uuid, *bitmap_meta->getModel()); - LOG_TRACE(&Poco::Logger::get("MetaStore"), "[detachAttachedDeleteBitmaps] Delete detached bitmap meta record {}", detached_bitmap_meta_key); + LOG_TRACE(getLogger("MetaStore"), "[detachAttachedDeleteBitmaps] Delete detached bitmap meta record {}", detached_bitmap_meta_key); batch_writer.addDelete(detached_bitmap_meta_key); } @@ -2937,7 +2937,7 @@ void MetastoreProxy::detachAttachedParts( auto info_ptr = createPartInfoFromModel(parts[idx].value().part_info()); String detached_part_key = detachedPartKey(name_space, to_uuid, info_ptr->getPartName()); - LOG_TRACE(&Poco::Logger::get("MetaStore"), "[detachAttachedParts] Write detach part record {}", + LOG_TRACE(getLogger("MetaStore"), "[detachAttachedParts] Write detach part record {}", detached_part_key); batch_writer.addPut(detached_part_key, parts[idx].value().SerializeAsString()); @@ -2952,7 +2952,7 @@ void MetastoreProxy::detachAttachedParts( for (size_t idx = 0; idx < attached_part_names.size(); ++idx) { String part_key = dataPartKey(name_space, from_uuid, attached_part_names[idx]); - LOG_TRACE(&Poco::Logger::get("MetaStore"), "[detachAttachedParts] Delete part record {}", + LOG_TRACE(getLogger("MetaStore"), "[detachAttachedParts] Delete part record {}", part_key); batch_writer.addDelete(part_key); @@ -2960,7 +2960,7 @@ void MetastoreProxy::detachAttachedParts( for (size_t idx = 0; idx < attached_staged_part_names.size(); ++idx) { String part_key = stagedDataPartKey(name_space, from_uuid, attached_staged_part_names[idx]); - LOG_TRACE(&Poco::Logger::get("MetaStore"), "[detachAttachedParts] Delete staged part record {}", part_key); + LOG_TRACE(getLogger("MetaStore"), "[detachAttachedParts] Delete staged part record {}", part_key); batch_writer.addDelete(part_key); } @@ -2976,7 +2976,7 @@ void MetastoreProxy::detachAttachedParts( const auto & bitmap_model_meta = bitmap_meta->getModel(); String detached_bitmap_meta_key = detachedDeleteBitmapKey(name_space, to_uuid, *bitmap_model_meta); LOG_TRACE( - &Poco::Logger::get("MetaStore"), + getLogger("MetaStore"), "[detachAttachedDeleteBitmaps] Write detach bitmap meta record {}", detached_bitmap_meta_key); @@ -2992,7 +2992,7 @@ void MetastoreProxy::detachAttachedParts( { String detached_bitmap_meta_key = deleteBitmapKey(name_space, from_uuid, *bitmap_meta->getModel()); LOG_TRACE( - &Poco::Logger::get("MetaStore"), "[detachAttachedDeleteBitmaps] Delete bitmap meta record {}", detached_bitmap_meta_key); + getLogger("MetaStore"), "[detachAttachedDeleteBitmaps] Delete bitmap meta record {}", detached_bitmap_meta_key); batch_writer.addDelete(detached_bitmap_meta_key); } @@ -3044,7 +3044,7 @@ std::vector> MetastoreProxy::attachDetachedPartsRaw( else part_key = stagedDataPartKey(name_space, tbl_uuid, part_names[i]); LOG_TRACE( - &Poco::Logger::get("MetaStore"), + getLogger("MetaStore"), "[attachDetachedPartsRaw] Write {} part meta record {}", i < detached_visible_part_size ? "" : "staged ", part_key); @@ -3061,7 +3061,7 @@ std::vector> MetastoreProxy::attachDetachedPartsRaw( for (size_t i = 0; i < part_names.size(); ++i) { String detached_part_key = detachedPartKey(name_space, tbl_uuid, part_names[i]); - LOG_TRACE(&Poco::Logger::get("MetaStore"), "[attachDetachedPartsRaw] Delete detached part record {}", + LOG_TRACE(getLogger("MetaStore"), "[attachDetachedPartsRaw] Delete detached part record {}", detached_part_key); batch_writer.addDelete(detached_part_key); @@ -3075,7 +3075,7 @@ std::vector> MetastoreProxy::attachDetachedPartsRaw( for (size_t i = 0; i < bitmap_names.size(); ++i) { String detached_bitmap_meta_key = detachedDeleteBitmapKey(name_space, tbl_uuid, bitmap_names[i]); - LOG_TRACE(&Poco::Logger::get("MS"), "[attachDetachedPartsRaw] Delete detached bitmap meta record {}", detached_bitmap_meta_key); + LOG_TRACE(getLogger("MS"), "[attachDetachedPartsRaw] Delete detached bitmap meta record {}", detached_bitmap_meta_key); batch_writer.addDelete(detached_bitmap_meta_key); } @@ -3102,7 +3102,7 @@ void MetastoreProxy::detachAttachedPartsRaw( for (const auto& [detached_part_name, detached_part_meta] : detached_part_metas) { String detached_part_key = detachedPartKey(name_space, to_uuid, detached_part_name); - LOG_TRACE(&Poco::Logger::get("MetaStore"), "Write detached part record {} in detachAttachedPartsRaw", + LOG_TRACE(getLogger("MetaStore"), "Write detached part record {} in detachAttachedPartsRaw", detached_part_key); batch_writer.addPut(detached_part_key, detached_part_meta); @@ -3117,11 +3117,11 @@ void MetastoreProxy::detachAttachedPartsRaw( { /// We don't know whether attach a staged part or normal part, just delete both. String attached_part_key = dataPartKey(name_space, from_uuid, attached_part_name); - LOG_TRACE(&Poco::Logger::get("MetaStore"), "Delete part record {} in detachAttachedPartsRaw", + LOG_TRACE(getLogger("MetaStore"), "Delete part record {} in detachAttachedPartsRaw", attached_part_key); String attached_staged_part_key = stagedDataPartKey(name_space, from_uuid, attached_part_name); - LOG_TRACE(&Poco::Logger::get("MetaStore"), "Delete staged part record {} in detachAttachedPartsRaw", attached_staged_part_key); + LOG_TRACE(getLogger("MetaStore"), "Delete staged part record {} in detachAttachedPartsRaw", attached_staged_part_key); batch_writer.addDelete(attached_part_key); batch_writer.addDelete(attached_staged_part_key); @@ -3135,7 +3135,7 @@ void MetastoreProxy::detachAttachedPartsRaw( for (const auto & [detached_bitmap_name, detached_bitmap_meta] : detached_bitmap_metas) { String detached_bitmap_key = detachedDeleteBitmapKey(name_space, to_uuid, detached_bitmap_name); - LOG_TRACE(&Poco::Logger::get("MetaStore"), "Write detached bitmap record {} in detachAttachedPartsRaw", detached_bitmap_key); + LOG_TRACE(getLogger("MetaStore"), "Write detached bitmap record {} in detachAttachedPartsRaw", detached_bitmap_key); batch_writer.addPut(detached_bitmap_key, detached_bitmap_meta); } @@ -3148,7 +3148,7 @@ void MetastoreProxy::detachAttachedPartsRaw( for (const String & attached_bitmap_name : attached_bitmap_names) { String attached_bitmap_key = deleteBitmapKey(name_space, from_uuid, attached_bitmap_name); - LOG_TRACE(&Poco::Logger::get("MetaStore"), "Delete bitmap record {} in detachAttachedPartsRaw", attached_bitmap_key); + LOG_TRACE(getLogger("MetaStore"), "Delete bitmap record {} in detachAttachedPartsRaw", attached_bitmap_key); batch_writer.addDelete(attached_bitmap_key); } @@ -3244,7 +3244,7 @@ bool MetastoreProxy::resetObjectAssembledSchemaAndPurgePartialSchemas( const SerializedObjectSchema & new_assembled_schema, const std::vector & partial_schema_txnids) { - Poco::Logger * log = &Poco::Logger::get(__func__); + LoggerPtr log = getLogger(__func__); BatchCommitRequest batch_write; bool if_not_exists = false; @@ -3348,7 +3348,7 @@ Strings MetastoreProxy::removePartitions(const String & name_space, const String auto partitions_meta = metastore_ptr->multiGet(request_keys); - Poco::Logger * log = &Poco::Logger::get(__func__); + LoggerPtr log = getLogger(__func__); Strings res; // try commit all partitions with CAS in one batch diff --git a/src/Catalog/StreamingHanlders.h b/src/Catalog/StreamingHanlders.h index a0c536572e..3b6486bae5 100644 --- a/src/Catalog/StreamingHanlders.h +++ b/src/Catalog/StreamingHanlders.h @@ -15,6 +15,7 @@ #pragma once #include +#include #include #include #include @@ -38,7 +39,7 @@ public: virtual void on_closed(brpc::StreamId) override; - Poco::Logger * log = &Poco::Logger::get("StreamingHandler"); + LoggerPtr log = getLogger("StreamingHandler"); HandlerManager & manager; HandlerIterator handler_it; }; diff --git a/src/Client/Connection.h b/src/Client/Connection.h index 8ad348c71d..624929b828 100644 --- a/src/Client/Connection.h +++ b/src/Client/Connection.h @@ -21,6 +21,7 @@ #pragma once +#include #include "common/types.h" #include @@ -28,6 +29,7 @@ #include #include +#include #if !defined(ARCADIA_BUILD) # include #endif @@ -364,16 +366,17 @@ private: { } - Poco::Logger * get() + LoggerPtr get() { - if (!log) - log = &Poco::Logger::get("Connection (" + parent.getDescription() + ")"); - + callOnce(log_initialized, [&] { + log = getLogger("Connection (" + parent.getDescription() + ")"); + }); return log; } private: - std::atomic log; + mutable OnceFlag log_initialized; + LoggerPtr log; Connection & parent; }; diff --git a/src/Client/ConnectionEstablisher.cpp b/src/Client/ConnectionEstablisher.cpp index 4d27c9efc6..42bcb02867 100644 --- a/src/Client/ConnectionEstablisher.cpp +++ b/src/Client/ConnectionEstablisher.cpp @@ -22,7 +22,7 @@ ConnectionEstablisher::ConnectionEstablisher( IConnectionPool * pool_, const ConnectionTimeouts * timeouts_, const Settings * settings_, - Poco::Logger * log_, + LoggerPtr log_, const QualifiedTableName * table_to_check_) : pool(pool_), timeouts(timeouts_), settings(settings_), log(log_), table_to_check(table_to_check_), is_finished(false) { @@ -109,7 +109,7 @@ ConnectionEstablisherAsync::ConnectionEstablisherAsync( IConnectionPool * pool_, const ConnectionTimeouts * timeouts_, const Settings * settings_, - Poco::Logger * log_, + LoggerPtr log_, const QualifiedTableName * table_to_check_) : connection_establisher(pool_, timeouts_, settings_, log_, table_to_check_) { diff --git a/src/Client/ConnectionEstablisher.h b/src/Client/ConnectionEstablisher.h index 495583ba7e..22244ed742 100644 --- a/src/Client/ConnectionEstablisher.h +++ b/src/Client/ConnectionEstablisher.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -22,7 +23,7 @@ public: ConnectionEstablisher(IConnectionPool * pool_, const ConnectionTimeouts * timeouts_, const Settings * settings_, - Poco::Logger * log, + LoggerPtr log, const QualifiedTableName * table_to_check = nullptr); /// Establish connection and save it in result, write possible exception message in fail_message. @@ -37,7 +38,7 @@ private: IConnectionPool * pool; const ConnectionTimeouts * timeouts; const Settings * settings; - Poco::Logger * log; + LoggerPtr log; const QualifiedTableName * table_to_check; bool is_finished; @@ -61,7 +62,7 @@ public: ConnectionEstablisherAsync(IConnectionPool * pool_, const ConnectionTimeouts * timeouts_, const Settings * settings_, - Poco::Logger * log_, + LoggerPtr log_, const QualifiedTableName * table_to_check = nullptr); /// Resume establishing connection. If the process was not finished, diff --git a/src/Client/ConnectionPool.h b/src/Client/ConnectionPool.h index 09c0c7995a..0093999c0e 100644 --- a/src/Client/ConnectionPool.h +++ b/src/Client/ConnectionPool.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -85,7 +86,7 @@ public: UInt16 rpc_port_ = 0, String worker_id_ = "virtual_id") : Base(max_connections_, - &Poco::Logger::get("ConnectionPool (" + host_ + ":" + toString(port_) + ")")), + getLogger("ConnectionPool (" + host_ + ":" + toString(port_) + ")")), host(host_), port(port_), default_database(default_database_), diff --git a/src/Client/ConnectionPoolWithFailover.cpp b/src/Client/ConnectionPoolWithFailover.cpp index 8c39070521..e5f01d86db 100644 --- a/src/Client/ConnectionPoolWithFailover.cpp +++ b/src/Client/ConnectionPoolWithFailover.cpp @@ -29,7 +29,7 @@ ConnectionPoolWithFailover::ConnectionPoolWithFailover( LoadBalancing load_balancing, time_t decrease_error_period_, size_t max_error_cap_) - : Base(std::move(nested_pools_), decrease_error_period_, max_error_cap_, &Poco::Logger::get("ConnectionPoolWithFailover")) + : Base(std::move(nested_pools_), decrease_error_period_, max_error_cap_, getLogger("ConnectionPoolWithFailover")) , default_load_balancing(load_balancing) { const std::string & local_hostname = getFQDNOrHostName(); diff --git a/src/Client/HedgedConnectionsFactory.cpp b/src/Client/HedgedConnectionsFactory.cpp index 5a42cc0d5c..eef893e158 100644 --- a/src/Client/HedgedConnectionsFactory.cpp +++ b/src/Client/HedgedConnectionsFactory.cpp @@ -24,7 +24,7 @@ HedgedConnectionsFactory::HedgedConnectionsFactory( const Settings * settings_, const ConnectionTimeouts & timeouts_, std::shared_ptr table_to_check_) - : pool(pool_), settings(settings_), timeouts(timeouts_), table_to_check(table_to_check_), log(&Poco::Logger::get("HedgedConnectionsFactory")) + : pool(pool_), settings(settings_), timeouts(timeouts_), table_to_check(table_to_check_), log(getLogger("HedgedConnectionsFactory")) { shuffled_pools = pool->getShuffledPools(settings); for (auto shuffled_pool : shuffled_pools) diff --git a/src/Client/HedgedConnectionsFactory.h b/src/Client/HedgedConnectionsFactory.h index c5e8d493ef..61a0e39050 100644 --- a/src/Client/HedgedConnectionsFactory.h +++ b/src/Client/HedgedConnectionsFactory.h @@ -2,6 +2,7 @@ #if defined(OS_LINUX) +#include #include #include #include @@ -129,7 +130,7 @@ private: int last_used_index = -1; bool fallback_to_stale_replicas; Epoll epoll; - Poco::Logger * log; + LoggerPtr log; std::string fail_messages; /// The maximum number of attempts to connect to replicas. diff --git a/src/CloudServices/CloudMergeTreeDedupWorker.cpp b/src/CloudServices/CloudMergeTreeDedupWorker.cpp index dcee4caa8a..270fc7a5db 100644 --- a/src/CloudServices/CloudMergeTreeDedupWorker.cpp +++ b/src/CloudServices/CloudMergeTreeDedupWorker.cpp @@ -52,7 +52,7 @@ CloudMergeTreeDedupWorker::CloudMergeTreeDedupWorker(StorageCloudMergeTree & sto : storage(storage_) , context(storage.getContext()) , log_name(storage.getLogName() + "(DedupWorker)") - , log(&Poco::Logger::get(log_name)) + , log(getLogger(log_name)) , interval_scheduler(storage.getSettings()->staged_part_lifetime_threshold_ms_to_block_kafka_consume) { task = storage.getContext()->getUniqueTableSchedulePool().createTask(log_name, [this] { run(); }); diff --git a/src/CloudServices/CloudMergeTreeDedupWorker.h b/src/CloudServices/CloudMergeTreeDedupWorker.h index 89c4895047..8db7cd0893 100644 --- a/src/CloudServices/CloudMergeTreeDedupWorker.h +++ b/src/CloudServices/CloudMergeTreeDedupWorker.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -128,7 +129,7 @@ private: else /// idle area ratio = 1.5; LOG_DEBUG( - &Poco::Logger::get("TaskIntervalScheduler"), + getLogger("TaskIntervalScheduler"), "min staged part timestamp: {} ms, current timestamp: {} ms, final ratio is: {}, current sleep time: {} ms.", mts, current_timestamp.toMillisecond(), @@ -154,7 +155,7 @@ private: std::atomic index{0}; ContextMutablePtr context; String log_name; - Poco::Logger * log; + LoggerPtr log; BackgroundSchedulePool::TaskHolder task; TaskIntervalScheduler interval_scheduler; std::atomic is_stopped{false}; diff --git a/src/CloudServices/CnchBGThreadPartitionSelector.cpp b/src/CloudServices/CnchBGThreadPartitionSelector.cpp index 29edcefa17..993aef7b91 100644 --- a/src/CloudServices/CnchBGThreadPartitionSelector.cpp +++ b/src/CloudServices/CnchBGThreadPartitionSelector.cpp @@ -26,7 +26,7 @@ HAVING insert_parts > 0; )"; CnchBGThreadPartitionSelector::CnchBGThreadPartitionSelector(ContextMutablePtr global_context_) -: WithMutableContext(global_context_), log(&Poco::Logger::get("PartitionSelector")) +: WithMutableContext(global_context_), log(getLogger("PartitionSelector")) { try { diff --git a/src/CloudServices/CnchBGThreadPartitionSelector.h b/src/CloudServices/CnchBGThreadPartitionSelector.h index f70e75a969..e01a819a69 100644 --- a/src/CloudServices/CnchBGThreadPartitionSelector.h +++ b/src/CloudServices/CnchBGThreadPartitionSelector.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -132,7 +133,7 @@ private: bool needRoundRobinPick(const StoragePtr & storage, Type type, size_t & out_n_suggestion); Strings doRoundRobinPick(const StoragePtr & storage, Type type, size_t n); - Poco::Logger * log; + LoggerPtr log; /// Whether loading digest information from system.server_part_log successful. bool load_success = true; diff --git a/src/CloudServices/CnchBGThreadsMap.cpp b/src/CloudServices/CnchBGThreadsMap.cpp index 00532b7e4c..f7b3e43fb6 100644 --- a/src/CloudServices/CnchBGThreadsMap.cpp +++ b/src/CloudServices/CnchBGThreadsMap.cpp @@ -153,7 +153,7 @@ CnchBGThreadPtr CnchBGThreadsMap::startThread(const StorageID & storage_id) { // Create new MergeThread but not start it, // to prevent daemon_manager send duplicate startMergeThread request - auto * log = &Poco::Logger::get("CnchBGThreadsMap"); + auto log = getLogger("CnchBGThreadsMap"); LOG_DEBUG(log, "Cancel start MergeThread for table {}, since table on the blacklist.", storage_id.getNameForLogs()); } else @@ -171,7 +171,7 @@ void CnchBGThreadsMap::tryRemoveThread(const StorageID & storage_id) auto t = tryGetThread(storage_id); if (!t) { - LOG_DEBUG(&Poco::Logger::get("CnchBGThreadsMap"), "{} for {} not found", toString(type), storage_id.getNameForLogs()); + LOG_DEBUG(getLogger("CnchBGThreadsMap"), "{} for {} not found", toString(type), storage_id.getNameForLogs()); return; } @@ -184,7 +184,7 @@ void CnchBGThreadsMap::tryDropThread(const StorageID & storage_id) auto t = tryGetThread(storage_id); if (!t) { - LOG_DEBUG(&Poco::Logger::get("CnchBGThreadsMap"), "{} for {} not found", toString(type), storage_id.getNameForLogs()); + LOG_DEBUG(getLogger("CnchBGThreadsMap"), "{} for {} not found", toString(type), storage_id.getNameForLogs()); return; } @@ -222,7 +222,7 @@ void CnchBGThreadsMap::cleanup() if (it->second->error()) { LOG_WARNING( - &Poco::Logger::get("CnchBGThreadsMap"), + getLogger("CnchBGThreadsMap"), "{} for {} got error, remove it", toString(type), it->second->getStorageID().getNameForLogs()); diff --git a/src/CloudServices/CnchDataWriter.cpp b/src/CloudServices/CnchDataWriter.cpp index b067a01c54..5c05e999c4 100644 --- a/src/CloudServices/CnchDataWriter.cpp +++ b/src/CloudServices/CnchDataWriter.cpp @@ -544,7 +544,7 @@ void CnchDataWriter::commitPreparedCnchParts(const DumpedData & dumped_data, con if (context->getServerType() != ServerType::cnch_server) throw Exception(ErrorCodes::LOGICAL_ERROR, "Must be called in Server mode: {}", context->getServerType()); - auto * log = storage.getLogger(); + auto log = storage.getLogger(); auto txn = context->getCurrentTransaction(); auto txn_id = txn->getTransactionID(); /// set main table uuid in server side diff --git a/src/CloudServices/CnchPartsHelper.cpp b/src/CloudServices/CnchPartsHelper.cpp index 943bd6a7c4..cf2aa465b7 100644 --- a/src/CloudServices/CnchPartsHelper.cpp +++ b/src/CloudServices/CnchPartsHelper.cpp @@ -489,9 +489,9 @@ namespace if (!partition_sorted || !partition_aligned) { if (!partition_sorted) - LOG_WARNING(&Poco::Logger::get(__func__), "parts are not partition sorted, this could make calcVisible slow"); + LOG_WARNING(getLogger(__func__), "parts are not partition sorted, this could make calcVisible slow"); else if (partition_ids.size() > 1) - LOG_WARNING(&Poco::Logger::get(__func__), "parts are not partition aligned, this could make calcVisible slow"); + LOG_WARNING(getLogger(__func__), "parts are not partition aligned, this could make calcVisible slow"); process_parts(all_parts, 0, all_parts.size(), visible_parts); } else @@ -531,7 +531,7 @@ namespace if (logging == EnableLogging) { - auto log = &Poco::Logger::get(__func__); + auto log = getLogger(__func__); LOG_DEBUG(log, "all_parts:\n {}", partsToDebugString(all_parts)); LOG_DEBUG(log, "visible_parts (skip_drop_ranges={}):\n{}", skip_drop_ranges, partsToDebugString(visible_parts)); if (visible_alone_drop_ranges) diff --git a/src/CloudServices/CnchServerClient.cpp b/src/CloudServices/CnchServerClient.cpp index 2c64d286e5..94256fa6aa 100644 --- a/src/CloudServices/CnchServerClient.cpp +++ b/src/CloudServices/CnchServerClient.cpp @@ -673,7 +673,7 @@ void CnchServerClient::precommitParts( size_t staged_part_batch_begin = std::min(batch_begin, staged_parts.size()); size_t staged_part_batch_end = std::min(batch_end, staged_parts.size()); - Poco::Logger * log = &Poco::Logger::get(__func__); + LoggerPtr log = getLogger(__func__); LOG_DEBUG( log, "Precommit: parts in batch: [{} ~ {}] of total: {}; delete_bitmaps in batch [{} ~ {}] of total {}; staged parts in batch [{} " @@ -783,7 +783,7 @@ void CnchServerClient::cleanTransaction(const TransactionRecord & txn_record) Protos::CleanTransactionReq request; Protos::CleanTransactionResp response; - LOG_DEBUG(&Poco::Logger::get(__func__), "clean txn: [{}] on server: {}", txn_record.toString(), getRPCAddress()); + LOG_DEBUG(getLogger(__func__), "clean txn: [{}] on server: {}", txn_record.toString(), getRPCAddress()); request.mutable_txn_record()->CopyFrom(txn_record.pb_model); stub->cleanTransaction(&cntl, &request, &response, nullptr); @@ -798,7 +798,7 @@ void CnchServerClient::cleanUndoBuffers(const TransactionRecord & txn_record) Protos::CleanUndoBuffersReq request; Protos::CleanUndoBuffersResp response; - LOG_DEBUG(&Poco::Logger::get(__func__), "clean undo buffers for txn: [{}] on server: {}", txn_record.toString(), getRPCAddress()); + LOG_DEBUG(getLogger(__func__), "clean undo buffers for txn: [{}] on server: {}", txn_record.toString(), getRPCAddress()); request.mutable_txn_record()->CopyFrom(txn_record.pb_model); stub->cleanUndoBuffers(&cntl, &request, &response, nullptr); diff --git a/src/CloudServices/CnchServerResource.h b/src/CloudServices/CnchServerResource.h index 728928878a..e6337f0057 100644 --- a/src/CloudServices/CnchServerResource.h +++ b/src/CloudServices/CnchServerResource.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -139,7 +140,7 @@ class CnchServerResource { public: explicit CnchServerResource(TxnTimestamp curr_txn_id) - : txn_id(curr_txn_id), log(&Poco::Logger::get("ServerResource")) + : txn_id(curr_txn_id), log(getLogger("ServerResource")) { } @@ -280,7 +281,7 @@ private: ResourceStageInfo resource_stage_info; bool skip_clean_worker{false}; - Poco::Logger * log; + LoggerPtr log; mutable ServerResourceLockManager lock_manager; bool send_mutations{false}; diff --git a/src/CloudServices/CnchServerServiceImpl.cpp b/src/CloudServices/CnchServerServiceImpl.cpp index 71fd9e8b36..2ad3fd0680 100644 --- a/src/CloudServices/CnchServerServiceImpl.cpp +++ b/src/CloudServices/CnchServerServiceImpl.cpp @@ -111,7 +111,7 @@ CnchServerServiceImpl::CnchServerServiceImpl(ContextMutablePtr global_context) : WithMutableContext(global_context), server_start_time(getTS(global_context)), global_gc_manager(global_context), - log(&Poco::Logger::get("CnchServerService")) + log(getLogger("CnchServerService")) { } @@ -177,7 +177,7 @@ void CnchServerServiceImpl::commitParts( auto column_commit_time = storage->getPartColumnsCommitTime(*(parts[0]->getColumnsPtr())); if (column_commit_time != storage->commit_time.toUInt64()) { - LOG_WARNING(&Poco::Logger::get("CnchServerService"), "Kafka consumer cannot commit parts because of underlying table change. Will reschedule consume task."); + LOG_WARNING(getLogger("CnchServerService"), "Kafka consumer cannot commit parts because of underlying table change. Will reschedule consume task."); throw Exception(ErrorCodes::CNCH_KAFKA_TASK_NEED_STOP, "Commit fails because of storage schema change"); } } @@ -187,7 +187,7 @@ void CnchServerServiceImpl::commitParts( for (const auto & tp : req->tpl()) tpl.emplace_back(cppkafka::TopicPartition(tp.topic(), tp.partition(), tp.offset())); - LOG_TRACE(&Poco::Logger::get("CnchServerService"), "parsed tpl to commit with size: {}\n", tpl.size()); + LOG_TRACE(getLogger("CnchServerService"), "parsed tpl to commit with size: {}\n", tpl.size()); } MySQLBinLogInfo binlog; diff --git a/src/CloudServices/CnchServerServiceImpl.h b/src/CloudServices/CnchServerServiceImpl.h index b8351501c4..98c96e3e16 100644 --- a/src/CloudServices/CnchServerServiceImpl.h +++ b/src/CloudServices/CnchServerServiceImpl.h @@ -19,6 +19,7 @@ # include "config_core.h" #endif +#include #include #include #include @@ -388,7 +389,7 @@ public: private: const UInt64 server_start_time; std::optional global_gc_manager; - Poco::Logger * log; + LoggerPtr log; }; REGISTER_SERVICE_IMPL(CnchServerServiceImpl); diff --git a/src/CloudServices/CnchWorkerClientPools.cpp b/src/CloudServices/CnchWorkerClientPools.cpp index 52a4fccc56..c5f20232a0 100644 --- a/src/CloudServices/CnchWorkerClientPools.cpp +++ b/src/CloudServices/CnchWorkerClientPools.cpp @@ -44,7 +44,7 @@ void CnchWorkerClientPools::addVirtualWarehouseImpl(const String & name, const S /// Will replace old pool pools[name] = std::move(pool); - LOG_INFO(&Poco::Logger::get("CnchWorkerClientPools"), "Added new vw: {} ", name); + LOG_INFO(getLogger("CnchWorkerClientPools"), "Added new vw: {} ", name); } void CnchWorkerClientPools::removeVirtualWarehouse(const String & name) diff --git a/src/CloudServices/CnchWorkerResource.cpp b/src/CloudServices/CnchWorkerResource.cpp index a5e8fcacc1..0b1b1be158 100644 --- a/src/CloudServices/CnchWorkerResource.cpp +++ b/src/CloudServices/CnchWorkerResource.cpp @@ -55,7 +55,7 @@ static ASTPtr parseCreateQuery(ContextMutablePtr context, const String & create_ void CnchWorkerResource::executeCreateQuery(ContextMutablePtr context, const String & create_query, bool skip_if_exists, const ColumnsDescription & object_columns) { - LOG_DEBUG(&Poco::Logger::get("WorkerResource"), "start create cloud table {}", create_query); + LOG_DEBUG(getLogger("WorkerResource"), "start create cloud table {}", create_query); auto ast_query = parseCreateQuery(context, create_query); auto & ast_create_query = ast_query->as(); @@ -85,7 +85,7 @@ void CnchWorkerResource::executeCacheableCreateQuery( const String & underlying_dictionary_tables, const ColumnsDescription & object_columns) { - static auto * log = &Poco::Logger::get("WorkerResource"); + static auto log = getLogger("WorkerResource"); std::shared_ptr cached; if (auto cache = context->tryGetCloudTableDefinitionCache(); cache && !context->hasSessionTimeZone()) @@ -220,7 +220,7 @@ void CnchWorkerResource::insertCloudTable(DatabaseAndTableName key, const Storag } } - static auto * log = &Poco::Logger::get("WorkerResource"); + static auto log = getLogger("WorkerResource"); LOG_DEBUG(log, "Successfully create database {} and table {} {}", tenant_db, storage->getName(), storage->getStorageID().getNameForLogs()); } diff --git a/src/CloudServices/CnchWorkerServiceImpl.cpp b/src/CloudServices/CnchWorkerServiceImpl.cpp index 70338d590c..305f863940 100644 --- a/src/CloudServices/CnchWorkerServiceImpl.cpp +++ b/src/CloudServices/CnchWorkerServiceImpl.cpp @@ -101,7 +101,7 @@ namespace ErrorCodes CnchWorkerServiceImpl::CnchWorkerServiceImpl(ContextMutablePtr context_) : WithMutableContext(context_->getGlobalContext()) - , log(&Poco::Logger::get("CnchWorkerService")) + , log(getLogger("CnchWorkerService")) , thread_pool(getNumberOfPhysicalCPUCores() * 4, getNumberOfPhysicalCPUCores() * 2, getNumberOfPhysicalCPUCores() * 8) { } diff --git a/src/CloudServices/CnchWorkerServiceImpl.h b/src/CloudServices/CnchWorkerServiceImpl.h index e46de1c067..f10e4943c8 100644 --- a/src/CloudServices/CnchWorkerServiceImpl.h +++ b/src/CloudServices/CnchWorkerServiceImpl.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -228,7 +229,7 @@ public: google::protobuf::Closure * done) override; private: - Poco::Logger * log; + LoggerPtr log; // class PreloadHandler; // std::shared_ptr preload_handler; diff --git a/src/CloudServices/DedupDataChecker.cpp b/src/CloudServices/DedupDataChecker.cpp index 551579af85..be1ad0baa4 100644 --- a/src/CloudServices/DedupDataChecker.cpp +++ b/src/CloudServices/DedupDataChecker.cpp @@ -240,7 +240,7 @@ BlockIO DedupDataChecker::tryToExecuteQuery(const String & query_to_execute) DedupDataChecker::DedupDataChecker(ContextPtr context_, String logger_name_, const MergeTreeMetaBase & storage_) : WithContext(context_) , log_name(logger_name_) - , log(&Poco::Logger::get(log_name)) + , log(getLogger(log_name)) , storage_id(storage_.getCnchStorageID()) { check_interval = storage_.getSettings()->check_duplicate_key_interval.totalMilliseconds(); diff --git a/src/CloudServices/DedupDataChecker.h b/src/CloudServices/DedupDataChecker.h index a508fa5bc7..7d4a9a26e3 100644 --- a/src/CloudServices/DedupDataChecker.h +++ b/src/CloudServices/DedupDataChecker.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -51,7 +52,7 @@ private: void processCheckerResult(StorageCnchMergeTree & cnch_table, Block & input_block); String log_name; - Poco::Logger * log; + LoggerPtr log; /// Data unique checker in background BackgroundSchedulePool::TaskHolder data_checker_task; std::atomic is_stopped{false}; diff --git a/src/CloudServices/ICnchBGThread.cpp b/src/CloudServices/ICnchBGThread.cpp index dfa089f73b..130494d244 100644 --- a/src/CloudServices/ICnchBGThread.cpp +++ b/src/CloudServices/ICnchBGThread.cpp @@ -29,7 +29,7 @@ ICnchBGThread::ICnchBGThread(ContextPtr global_context_, CnchBGThreadType thread , thread_type(thread_type_) , storage_id(storage_id_) , catalog(global_context_->getCnchCatalog()) - , log(&Poco::Logger::get(storage_id.getNameForLogs() + "(" + toString(thread_type) + ")")) + , log(getLogger(storage_id.getNameForLogs() + "(" + toString(thread_type) + ")")) , startup_time(time(nullptr)) { switch (thread_type) diff --git a/src/CloudServices/ICnchBGThread.h b/src/CloudServices/ICnchBGThread.h index 196b383f22..7522d3a7a2 100644 --- a/src/CloudServices/ICnchBGThread.h +++ b/src/CloudServices/ICnchBGThread.h @@ -16,6 +16,7 @@ #pragma once #include +#include #include #include #include @@ -112,7 +113,7 @@ protected: const CnchBGThreadType thread_type; const StorageID storage_id; std::shared_ptr catalog; - Poco::Logger * log; + LoggerPtr log; BackgroundSchedulePool::TaskHolder scheduled_task; /// Set to true when the BackgroundThread quit because of another same task already started on other servers. Only for MergeMutateThread. diff --git a/src/CloudServices/RpcClientBase.cpp b/src/CloudServices/RpcClientBase.cpp index e0d204e9b8..28be96b8c2 100644 --- a/src/CloudServices/RpcClientBase.cpp +++ b/src/CloudServices/RpcClientBase.cpp @@ -58,7 +58,7 @@ RpcClientBase::RpcClientBase(const String & log_prefix, const String & host_port } RpcClientBase::RpcClientBase(const String & log_prefix, HostWithPorts host_ports_, brpc::ChannelOptions * options) - : log(&Poco::Logger::get(log_prefix + "[" + host_ports_.toDebugString() + "]")) + : log(getLogger(log_prefix + "[" + host_ports_.toDebugString() + "]")) , host_ports(std::move(host_ports_)) , channel(std::make_unique()) { diff --git a/src/CloudServices/RpcClientBase.h b/src/CloudServices/RpcClientBase.h index c5600cc44d..a6fe0b6b0c 100644 --- a/src/CloudServices/RpcClientBase.h +++ b/src/CloudServices/RpcClientBase.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include @@ -54,7 +55,7 @@ protected: void assertController(const brpc::Controller & cntl); void initChannel(brpc::Channel & channel_, const String & host_port_, brpc::ChannelOptions * options = nullptr); - Poco::Logger * log; + LoggerPtr log; HostWithPorts host_ports; std::unique_ptr channel; diff --git a/src/CloudServices/selectPartsToMerge.cpp b/src/CloudServices/selectPartsToMerge.cpp index a059b9f084..a723ee1eea 100644 --- a/src/CloudServices/selectPartsToMerge.cpp +++ b/src/CloudServices/selectPartsToMerge.cpp @@ -33,7 +33,7 @@ ServerSelectPartsDecision selectPartsToMerge( bool enable_batch_select, bool final, [[maybe_unused]] bool merge_with_ttl_allowed, - Poco::Logger * log) + LoggerPtr log) { const auto data_settings = data.getSettings(); auto metadata_snapshot = data.getInMemoryMetadataPtr(); diff --git a/src/CloudServices/selectPartsToMerge.h b/src/CloudServices/selectPartsToMerge.h index 73f4111d45..497b779fa8 100644 --- a/src/CloudServices/selectPartsToMerge.h +++ b/src/CloudServices/selectPartsToMerge.h @@ -16,6 +16,7 @@ #pragma once #include +#include #include namespace DB @@ -41,7 +42,7 @@ ServerSelectPartsDecision selectPartsToMerge( bool enable_batch_select, bool final, bool merge_with_ttl_allowed, - Poco::Logger * log); + LoggerPtr log); /** * Group data parts by bucket number diff --git a/src/Columns/ListIndex.cpp b/src/Columns/ListIndex.cpp index 4ccdeb2168..f9e7a29240 100644 --- a/src/Columns/ListIndex.cpp +++ b/src/Columns/ListIndex.cpp @@ -135,7 +135,7 @@ void BitmapIndexReader::init() irk_buffer = std::make_unique(index_irk_disk->getPath() + index_irk_path, DBMS_DEFAULT_BUFFER_SIZE); LOG_DEBUG( - &Poco::Logger::get("BitmapIndexReader"), "Get BitMapIndex read buffers from local cache for column " + column_name); + getLogger("BitmapIndexReader"), "Get BitMapIndex read buffers from local cache for column " + column_name); read_from_local_cache = true; return; } @@ -147,7 +147,7 @@ void BitmapIndexReader::init() } catch(...) { - tryLogCurrentException(&Poco::Logger::get("BitmapIndexReader"), "Cache or Get BitMapIndex Failed"); + tryLogCurrentException(getLogger("BitmapIndexReader"), "Cache or Get BitMapIndex Failed"); } try @@ -179,7 +179,7 @@ void BitmapIndexReader::init() } catch(...) { - tryLogCurrentException(&Poco::Logger::get("BitmapIndexReader"), __PRETTY_FUNCTION__); + tryLogCurrentException(getLogger("BitmapIndexReader"), __PRETTY_FUNCTION__); compressed_idx = nullptr; irk_buffer = nullptr; } diff --git a/src/Columns/ListIndex.h b/src/Columns/ListIndex.h index a790484d39..e58bd41b87 100644 --- a/src/Columns/ListIndex.h +++ b/src/Columns/ListIndex.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -264,7 +265,7 @@ public: virtual String getPath() const { return path; } - Poco::Logger * log = &Poco::Logger::get("BitmapColumnListIndexes"); + LoggerPtr log = getLogger("BitmapColumnListIndexes"); virtual ~IBitmapColumnListIndexes() = default; }; diff --git a/src/Columns/SegmentListIndex.cpp b/src/Columns/SegmentListIndex.cpp index 0085ff1db0..f350e2c648 100644 --- a/src/Columns/SegmentListIndex.cpp +++ b/src/Columns/SegmentListIndex.cpp @@ -108,7 +108,7 @@ void SegmentBitmapIndexReader::init() } catch(...) { - tryLogCurrentException(&Poco::Logger::get("SegmentBitmapIndexReader"), __PRETTY_FUNCTION__); + tryLogCurrentException(getLogger("SegmentBitmapIndexReader"), __PRETTY_FUNCTION__); compressed_idx = nullptr; seg_tab = nullptr; seg_dir = nullptr; diff --git a/src/Columns/SegmentListIndex.h b/src/Columns/SegmentListIndex.h index c95ac7e02a..ab6a9fa2ff 100644 --- a/src/Columns/SegmentListIndex.h +++ b/src/Columns/SegmentListIndex.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -186,7 +187,7 @@ class ISegmentBitmapColumnListIndexes virtual String getPath() const { return path; } - Poco::Logger * log = &Poco::Logger::get("SegmentBitmapColumnListIndexes"); + LoggerPtr log = getLogger("SegmentBitmapColumnListIndexes"); virtual ~ISegmentBitmapColumnListIndexes() = default; }; diff --git a/src/Common/AdditionalServices.cpp b/src/Common/AdditionalServices.cpp index 813c0fe421..87468e80e6 100644 --- a/src/Common/AdditionalServices.cpp +++ b/src/Common/AdditionalServices.cpp @@ -33,7 +33,7 @@ AdditionalService::AdditionalService(std::string svc_s) noexcept } } - LOG_WARNING(&Poco::Logger::get("AdditionalService"), "cannot find additional service {}", svc_s); + LOG_WARNING(getLogger("AdditionalService"), "cannot find additional service {}", svc_s); additional_service = Value::SIZE; } void AdditionalServices::parseAdditionalServicesFromConfig(const Poco::Util::AbstractConfiguration & config) diff --git a/src/Common/Brpc/BrpcApplication.cpp b/src/Common/Brpc/BrpcApplication.cpp index 9c710fe823..d4f4a1400a 100644 --- a/src/Common/Brpc/BrpcApplication.cpp +++ b/src/Common/Brpc/BrpcApplication.cpp @@ -25,7 +25,7 @@ namespace DB { BrpcApplication::BrpcApplication() { - logger = &Poco::Logger::get("BrpcApplication"); + logger = getLogger("BrpcApplication"); //Init Brpc log initBrpcLog(); diff --git a/src/Common/Brpc/BrpcApplication.h b/src/Common/Brpc/BrpcApplication.h index b098473a63..210c401db2 100644 --- a/src/Common/Brpc/BrpcApplication.h +++ b/src/Common/Brpc/BrpcApplication.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -49,7 +50,7 @@ public: private: ::logging::LogSink * old_sink; - Poco::Logger * logger; + LoggerPtr logger; ConfigHolderMap config_holder_map; mutable std::mutex holder_map_mutex; diff --git a/src/Common/Brpc/BrpcGflagsConfigHolder.h b/src/Common/Brpc/BrpcGflagsConfigHolder.h index af4a7c045d..111a3979e0 100644 --- a/src/Common/Brpc/BrpcGflagsConfigHolder.h +++ b/src/Common/Brpc/BrpcGflagsConfigHolder.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -31,13 +32,13 @@ class BrpcGflagsConfigHolder : public SealedConfigHolder #include #include #include @@ -26,7 +27,7 @@ namespace DB class BrpcPocoLogSink : public ::logging::LogSink, private boost::noncopyable { public: - explicit BrpcPocoLogSink() { logger = &Poco::Logger::get("brpc"); } + explicit BrpcPocoLogSink() { logger = getLogger("brpc"); } ~BrpcPocoLogSink() override = default; @@ -34,7 +35,7 @@ public: private: - Poco::Logger * logger; + LoggerPtr logger; }; static inline Poco::Message::Priority brpc2PocoLogPriority(int brpcLogPriority) diff --git a/src/Common/BucketLRUCache.h b/src/Common/BucketLRUCache.h index 3bffbb76f7..f0bd5fe3d5 100644 --- a/src/Common/BucketLRUCache.h +++ b/src/Common/BucketLRUCache.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -202,7 +203,7 @@ public: }; explicit BucketLRUCache(const Options& opts_): - opts(opts_), logger(&Poco::Logger::get("BucketLRUCache")), + opts(opts_), logger(getLogger("BucketLRUCache")), weighter(), current_weight(), hits(0), misses(0), container(opts_.mapping_bucket_size) { @@ -518,7 +519,7 @@ private: auto iter = bucket.cells.find(key); if (iter == bucket.cells.end()) { - LOG_ERROR(&Poco::Logger::get("BucketLRUCache"), "BucketLRUCache become inconsistent, There must be a bug on it"); + LOG_ERROR(getLogger("BucketLRUCache"), "BucketLRUCache become inconsistent, There must be a bug on it"); abort(); } @@ -801,7 +802,7 @@ private: } const Options opts; - Poco::Logger* logger; + LoggerPtr logger; TWeighter weighter; // First return value indiecate if the entry should dropped, diff --git a/src/Common/CGroup/CGroupManagerFactory.cpp b/src/Common/CGroup/CGroupManagerFactory.cpp index 90580fa7f5..1ca751a7b1 100644 --- a/src/Common/CGroup/CGroupManagerFactory.cpp +++ b/src/Common/CGroup/CGroupManagerFactory.cpp @@ -34,7 +34,7 @@ void CGroupManagerFactory::loadFromConfig(const Poco::Util::AbstractConfiguratio if (!config.has("enable_cgroup") || !config.getBool("enable_cgroup")) return; - LOG_INFO(&Poco::Logger::get("CGroupManager"), "Init CGroupManager"); + LOG_INFO(getLogger("CGroupManager"), "Init CGroupManager"); if (config.has("cgroup_root_path") && !config.getString("cgroup_root_path").empty()) { diff --git a/src/Common/ChineseTokenExtractor.cpp b/src/Common/ChineseTokenExtractor.cpp index 46cf893574..849d209835 100644 --- a/src/Common/ChineseTokenExtractor.cpp +++ b/src/Common/ChineseTokenExtractor.cpp @@ -61,7 +61,7 @@ void ChineseTokenizerFactory::registeChineseTokneizer(const Poco::Util::Abstract tokenizers[tokenizer_name] = std::make_shared( dict_path, hmm_model_path, user_dict_path, idf_path, stop_words_path); - LOG_TRACE(&Poco::Logger::get(__func__), "registe chinese tokenizer config name: {} ", tokenizer_name); + LOG_TRACE(getLogger(__func__), "registe chinese tokenizer config name: {} ", tokenizer_name); } } diff --git a/src/Common/Config/ConfigProcessor.cpp b/src/Common/Config/ConfigProcessor.cpp index 9be76d28c0..c759591897 100644 --- a/src/Common/Config/ConfigProcessor.cpp +++ b/src/Common/Config/ConfigProcessor.cpp @@ -89,7 +89,7 @@ ConfigProcessor::ConfigProcessor( } else { - log = &Poco::Logger::get("ConfigProcessor"); + log = getRawLogger("ConfigProcessor"); } } diff --git a/src/Common/Config/ConfigProcessor.h b/src/Common/Config/ConfigProcessor.h index 6e903255e5..413c6f7c37 100644 --- a/src/Common/Config/ConfigProcessor.h +++ b/src/Common/Config/ConfigProcessor.h @@ -4,6 +4,7 @@ #include #endif +#include #include #include #include @@ -120,7 +121,8 @@ private: bool throw_on_bad_incl; - Poco::Logger * log; + /// stick to raw logger in order to support Poco::Logger::has(name) + LoggerRawPtr log; Poco::AutoPtr channel_ptr; Substitutions substitutions; diff --git a/src/Common/Config/ConfigReloader.h b/src/Common/Config/ConfigReloader.h index 2e4399d3c4..80f2f19116 100644 --- a/src/Common/Config/ConfigReloader.h +++ b/src/Common/Config/ConfigReloader.h @@ -1,5 +1,6 @@ #pragma once +#include #include "ConfigProcessor.h" #include #include @@ -69,7 +70,7 @@ private: static constexpr auto reload_interval = std::chrono::seconds(2); - Poco::Logger * log = &Poco::Logger::get("ConfigReloader"); + LoggerPtr log = getLogger("ConfigReloader"); std::string path; std::string include_from_path; diff --git a/src/Common/ConfigurationCommon.cpp b/src/Common/ConfigurationCommon.cpp index 193b0d6e13..d6cf06d49e 100644 --- a/src/Common/ConfigurationCommon.cpp +++ b/src/Common/ConfigurationCommon.cpp @@ -36,7 +36,7 @@ bool ConfigurationFieldBase::checkField(const PocoAbstractConfig & config, const if (deprecated()) { LOG_WARNING( - &Poco::Logger::get("Configuration"), "Config element {} is deprecated. Please remove corresponding tags!", full_key); + getLogger("Configuration"), "Config element {} is deprecated. Please remove corresponding tags!", full_key); } } else @@ -44,7 +44,7 @@ bool ConfigurationFieldBase::checkField(const PocoAbstractConfig & config, const if (recommended()) { LOG_DEBUG( - &Poco::Logger::get("Configuration"), + getLogger("Configuration"), "Config element {} is recommended to set in config.xml. You'd better customize it.", full_key); } else if (required()) diff --git a/src/Common/DNSResolver.cpp b/src/Common/DNSResolver.cpp index 8b006bc550..c3ed9d60d6 100644 --- a/src/Common/DNSResolver.cpp +++ b/src/Common/DNSResolver.cpp @@ -156,7 +156,7 @@ struct DNSResolver::Impl }; -DNSResolver::DNSResolver() : impl(std::make_unique()), log(&Poco::Logger::get("DNSResolver")) {} +DNSResolver::DNSResolver() : impl(std::make_unique()), log(getLogger("DNSResolver")) {} Poco::Net::IPAddress DNSResolver::resolveHost(const std::string & host) { diff --git a/src/Common/DNSResolver.h b/src/Common/DNSResolver.h index 57c28188f5..4901f8d0e1 100644 --- a/src/Common/DNSResolver.h +++ b/src/Common/DNSResolver.h @@ -1,4 +1,5 @@ #pragma once +#include #include #include #include @@ -60,7 +61,7 @@ private: struct Impl; std::unique_ptr impl; - Poco::Logger * log; + LoggerPtr log; /// Updates cached value and returns true it has been changed. bool updateHost(const String & host); diff --git a/src/Common/Exception.cpp b/src/Common/Exception.cpp index 2881f6736c..e8aa7c294b 100644 --- a/src/Common/Exception.cpp +++ b/src/Common/Exception.cpp @@ -190,7 +190,8 @@ void throwFromErrnoWithPath(const std::string & s, const std::string & path, int throw ErrnoException(s + ", " + errnoToString(code, the_errno) + ", path = " + path, code, the_errno, path); } -static void tryLogCurrentExceptionImpl(Poco::Logger * logger, const std::string & start_of_message) +template +static void tryLogCurrentExceptionImpl(T logger, const std::string & start_of_message) { try { @@ -213,8 +214,7 @@ void tryLogCurrentException(const char * log_name, const std::string & start_of_ /// MemoryTracker until the exception will be logged. MemoryTracker::LockExceptionInThread lock_memory_tracker(VariableContext::Global); - /// Poco::Logger::get can allocate memory too - tryLogCurrentExceptionImpl(&Poco::Logger::get(log_name), start_of_message); + tryLogCurrentExceptionImpl(getLogger(log_name), start_of_message); } void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_message) @@ -229,9 +229,21 @@ void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_ tryLogCurrentExceptionImpl(logger, start_of_message); } +void tryLogCurrentException(LoggerPtr logger, const std::string & start_of_message) +{ + /// Under high memory pressure, any new allocation will definitelly lead + /// to MEMORY_LIMIT_EXCEEDED exception. + /// + /// And in this case the exception will not be logged, so let's block the + /// MemoryTracker until the exception will be logged. + MemoryTracker::LockExceptionInThread lock_memory_tracker(VariableContext::Global); + + tryLogCurrentExceptionImpl(logger, start_of_message); +} + void tryLogDebugCurrentException(const char * log_name, const std::string & start_of_message) { - tryLogDebugCurrentException(&Poco::Logger::get(log_name), start_of_message); + tryLogDebugCurrentException(getLogger(log_name), start_of_message); } void tryLogDebugCurrentException(Poco::Logger * logger, const std::string & start_of_message) @@ -245,9 +257,20 @@ void tryLogDebugCurrentException(Poco::Logger * logger, const std::string & star } } +void tryLogDebugCurrentException(LoggerPtr logger, const std::string & start_of_message) +{ + try + { + LOG_DEBUG(logger, start_of_message + (start_of_message.empty() ? "" : ": ") + getCurrentExceptionMessage(true)); + } + catch (...) + { + } +} + void tryLogWarningCurrentException(const char * log_name, const std::string & start_of_message) { - tryLogWarningCurrentException(&Poco::Logger::get(log_name), start_of_message); + tryLogWarningCurrentException(getLogger(log_name), start_of_message); } void tryLogWarningCurrentException(Poco::Logger * logger, const std::string & start_of_message) @@ -261,6 +284,17 @@ void tryLogWarningCurrentException(Poco::Logger * logger, const std::string & st } } +void tryLogWarningCurrentException(LoggerPtr logger, const std::string & start_of_message) +{ + try + { + LOG_WARNING(logger, start_of_message + (start_of_message.empty() ? "" : ": ") + getCurrentExceptionMessage(true)); + } + catch (...) + { + } +} + std::unique_ptr getSerializableException() { try @@ -519,6 +553,18 @@ void tryLogException(std::exception_ptr e, Poco::Logger * logger, const std::str } } +void tryLogException(std::exception_ptr e, LoggerPtr logger, const std::string & start_of_message) +{ + try + { + std::rethrow_exception(std::move(e)); // NOLINT + } + catch (...) + { + tryLogCurrentException(logger, start_of_message); + } +} + std::string getExceptionMessage(const Exception & e, bool with_stacktrace, bool check_embedded_stacktrace) { WriteBufferFromOwnString stream; diff --git a/src/Common/Exception.h b/src/Common/Exception.h index aa6e2c251e..cda813ceb9 100644 --- a/src/Common/Exception.h +++ b/src/Common/Exception.h @@ -32,6 +32,7 @@ #include #include +#include #include @@ -184,12 +185,15 @@ using Exceptions = std::vector; */ void tryLogCurrentException(const char * log_name, const std::string & start_of_message = ""); void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_message = ""); +void tryLogCurrentException(LoggerPtr logger, const std::string & start_of_message = ""); void tryLogDebugCurrentException(const char * log_name, const std::string & start_of_message = ""); void tryLogDebugCurrentException(Poco::Logger * logger, const std::string & start_of_message = ""); +void tryLogDebugCurrentException(LoggerPtr logger, const std::string & start_of_message = ""); void tryLogWarningCurrentException(const char * log_name, const std::string & start_of_message = ""); void tryLogWarningCurrentException(Poco::Logger * logger, const std::string & start_of_message = ""); +void tryLogWarningCurrentException(LoggerPtr logger, const std::string & start_of_message = ""); /** Prints current exception in canonical format. * with_stacktrace - prints stack trace for DB::Exception. @@ -229,6 +233,7 @@ struct ExecutionStatus void tryLogException(std::exception_ptr e, const char * log_name, const std::string & start_of_message = ""); void tryLogException(std::exception_ptr e, Poco::Logger * logger, const std::string & start_of_message = ""); +void tryLogException(std::exception_ptr e, LoggerPtr logger, const std::string & start_of_message = ""); std::string getExceptionMessage(const Exception & e, bool with_stacktrace, bool check_embedded_stacktrace = false); std::string getExceptionMessage(std::exception_ptr e, bool with_stacktrace); diff --git a/src/Common/FileChecker.cpp b/src/Common/FileChecker.cpp index 173c4bd8a3..eccfda1c17 100644 --- a/src/Common/FileChecker.cpp +++ b/src/Common/FileChecker.cpp @@ -97,7 +97,7 @@ void FileChecker::repair() if (real_size > expected_size) { - LOG_WARNING(&Poco::Logger::get("FileChecker"), "Will truncate file {} that has size {} to size {}", path, real_size, expected_size); + LOG_WARNING(getLogger("FileChecker"), "Will truncate file {} that has size {} to size {}", path, real_size, expected_size); disk->truncateFile(path, expected_size); } } diff --git a/src/Common/FileChecker.h b/src/Common/FileChecker.h index 73e4470f23..f187416c17 100644 --- a/src/Common/FileChecker.h +++ b/src/Common/FileChecker.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -41,7 +42,7 @@ private: Map map; - Poco::Logger * log = &Poco::Logger::get("FileChecker"); + LoggerPtr log = getLogger("FileChecker"); }; } diff --git a/src/Common/FrequencyHolder.h b/src/Common/FrequencyHolder.h index 77d8b98ef5..03b5ec5fb4 100644 --- a/src/Common/FrequencyHolder.h +++ b/src/Common/FrequencyHolder.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -88,7 +89,7 @@ private: void loadEncodingsFrequency() { - Poco::Logger * log = &Poco::Logger::get("EncodingsFrequency"); + LoggerPtr log = getLogger("EncodingsFrequency"); LOG_TRACE(log, "Loading embedded charset frequencies"); @@ -146,7 +147,7 @@ private: void loadEmotionalDict() { - Poco::Logger * log = &Poco::Logger::get("EmotionalDict"); + LoggerPtr log = getLogger("EmotionalDict"); LOG_TRACE(log, "Loading embedded emotional dictionary"); auto resource = getResource("tonality_ru.zst"); @@ -184,7 +185,7 @@ private: void loadProgrammingFrequency() { - Poco::Logger * log = &Poco::Logger::get("ProgrammingFrequency"); + LoggerPtr log = getLogger("ProgrammingFrequency"); LOG_TRACE(log, "Loading embedded programming languages frequencies loading"); diff --git a/src/Common/HistogramMetrics.h b/src/Common/HistogramMetrics.h index 17e3ddad2f..3d19e5df89 100644 --- a/src/Common/HistogramMetrics.h +++ b/src/Common/HistogramMetrics.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -86,7 +87,7 @@ namespace HistogramMetrics { if (type != Metrics::MetricType::Timer) { - LOG_ERROR(&Poco::Logger::get("HistogramMetrics"), "Only support Metrics::MetricType::Timer type when report histogram metrics"); + LOG_ERROR(getLogger("HistogramMetrics"), "Only support Metrics::MetricType::Timer type when report histogram metrics"); return; } @@ -96,7 +97,7 @@ namespace HistogramMetrics } catch (DB::Exception & e) { - LOG_ERROR(&Poco::Logger::get("HistogramMetrics"), "Metrics emit metric failed: {}", e.message()); + LOG_ERROR(getLogger("HistogramMetrics"), "Metrics emit metric failed: {}", e.message()); } } } diff --git a/src/Common/JeprofControl.h b/src/Common/JeprofControl.h index e28c134225..e040edb1c3 100644 --- a/src/Common/JeprofControl.h +++ b/src/Common/JeprofControl.h @@ -1,5 +1,6 @@ #pragma once +#include #include #if __has_include() @@ -46,7 +47,7 @@ private: String prof_path{"/tmp/"}; mutable std::atomic index{0}; - Poco::Logger * log{&Poco::Logger::get("JeprofControl")}; + LoggerPtr log{getLogger("JeprofControl")}; #endif }; diff --git a/src/Common/LRUCache.h b/src/Common/LRUCache.h index 4bfa872863..b1b0386c08 100644 --- a/src/Common/LRUCache.h +++ b/src/Common/LRUCache.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -525,7 +526,7 @@ private: auto it = cells.find(key); if (it == cells.end()) { - LOG_ERROR(&Poco::Logger::get("LRUCache"), "LRUCache became inconsistent. There must be a bug in it."); + LOG_ERROR(getLogger("LRUCache"), "LRUCache became inconsistent. There must be a bug in it."); abort(); } @@ -554,7 +555,7 @@ private: if (current_size > (1ull << 63)) { - LOG_ERROR(&Poco::Logger::get("LRUCache"), "LRUCache became inconsistent. There must be a bug in it."); + LOG_ERROR(getLogger("LRUCache"), "LRUCache became inconsistent. There must be a bug in it."); abort(); } } diff --git a/src/Common/Logger.cpp b/src/Common/Logger.cpp new file mode 100644 index 0000000000..08cdb68106 --- /dev/null +++ b/src/Common/Logger.cpp @@ -0,0 +1,17 @@ +#include + +LoggerPtr getLogger(const std::string & name) +{ + static Poco::Logger * root = &Poco::Logger::root(); + return std::make_shared(name, root); +} + +LoggerPtr getLogger(Poco::Logger & raw_logger) +{ + return std::make_shared(raw_logger.name(), &raw_logger); +} + +LoggerRawPtr getRawLogger(const std::string & name) +{ + return &Poco::Logger::get(name); +} diff --git a/src/Common/Logger.h b/src/Common/Logger.h new file mode 100644 index 0000000000..25935e31a7 --- /dev/null +++ b/src/Common/Logger.h @@ -0,0 +1,61 @@ +#pragma once + +#include +#include + +namespace DB +{ +/// Lightweight logger that acts mostly like Poco::Logger but is cheaper to create & destroy. +class VirtualLogger +{ +public: + explicit VirtualLogger(const std::string & name_, ::Poco::Logger * wrapped_) : fake_name(name_), wrapped(wrapped_) { } + + /// implement functions used by LOG_IMPL(...) so that it can be used in LOG_XXX macros + const std::string & name() const { return fake_name; } + bool is(int level) const { return wrapped->is(level); } + ::Poco::Channel * getChannel() const { return wrapped->getChannel(); } + + bool fatal() const { return wrapped->fatal(); } + bool critical() const { return wrapped->critical(); } + bool error() const { return wrapped->error(); } + bool warning() const { return wrapped->warning(); } + bool notice() const { return wrapped->notice(); } + bool information() const { return wrapped->information(); } + bool debug() const { return wrapped->debug(); } + bool trace() const { return wrapped->trace(); } + + int getLevel() const { return wrapped->getLevel(); } + void log(const ::Poco::Message & msg) { wrapped->log(msg); } + +private: + std::string fake_name; + ::Poco::Logger * wrapped; +}; + +} // namespace DB + +using LoggerPtr = std::shared_ptr; +using LoggerRawPtr = Poco::Logger *; + +/// Factory method to obtain a lightweght logger. +/// +/// Advantages over `&Poco::Logger::get(name)` or `getRawLogger` +/// 1. no lock contention during logger creation / destruction +/// 2. no risk to memleak (logger won't be added to global map structure) +LoggerPtr getLogger(const std::string & name); + +/// Adaptor interface that convert an existing Poco::Logger into LoggerPtr. +/// It's caller's responsibility to make sure `raw_logger` lives longer, +/// Use with caution. +LoggerPtr getLogger(Poco::Logger & raw_logger); + +/** Create raw Poco::Logger that will not be destroyed before program termination. + * This can be used in cases when specific Logger instance can be singletone. + * + * For example you need to pass Logger into low-level libraries as raw pointer, and using + * RAII wrapper is inconvenient. + * + * Generally you should always use getLogger functions. + */ +LoggerRawPtr getRawLogger(const std::string & name); diff --git a/src/Common/Macros.cpp b/src/Common/Macros.cpp index 7882449b59..282f545efe 100644 --- a/src/Common/Macros.cpp +++ b/src/Common/Macros.cpp @@ -13,7 +13,7 @@ namespace ErrorCodes extern const int SYNTAX_ERROR; } -Macros::Macros(const Poco::Util::AbstractConfiguration & config, const String & root_key, Poco::Logger * log) +Macros::Macros(const Poco::Util::AbstractConfiguration & config, const String & root_key, LoggerPtr log) { Poco::Util::AbstractConfiguration::Keys keys; config.keys(root_key, keys); diff --git a/src/Common/Macros.h b/src/Common/Macros.h index 9298dbfc2d..a90b4875ae 100644 --- a/src/Common/Macros.h +++ b/src/Common/Macros.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -26,7 +27,7 @@ class Macros { public: Macros() = default; - Macros(const Poco::Util::AbstractConfiguration & config, const String & key, Poco::Logger * log = nullptr); + Macros(const Poco::Util::AbstractConfiguration & config, const String & key, LoggerPtr log = nullptr); struct MacroExpansionInfo { diff --git a/src/Common/MemoryTracker.cpp b/src/Common/MemoryTracker.cpp index 9749b259bd..1134d38695 100644 --- a/src/Common/MemoryTracker.cpp +++ b/src/Common/MemoryTracker.cpp @@ -117,9 +117,9 @@ MemoryTracker total_memory_tracker(nullptr, VariableContext::Global); MemoryTracker::MemoryTracker(VariableContext level_) - : parent(&total_memory_tracker), log(&Poco::Logger::get("MemoryTracker")), level(level_) {} + : parent(&total_memory_tracker), log(getLogger("MemoryTracker")), level(level_) {} MemoryTracker::MemoryTracker(MemoryTracker * parent_, VariableContext level_) - : parent(parent_), log(&Poco::Logger::get("MemoryTracker")), level(level_) {} + : parent(parent_), log(getLogger("MemoryTracker")), level(level_) {} MemoryTracker::~MemoryTracker() diff --git a/src/Common/MemoryTracker.h b/src/Common/MemoryTracker.h index 70dbf5a2a9..4f786527e5 100644 --- a/src/Common/MemoryTracker.h +++ b/src/Common/MemoryTracker.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -79,7 +80,7 @@ private: /// In terms of tree nodes it is the list of parents. Lifetime of these trackers should "include" lifetime of current tracker. std::atomic parent {}; - Poco::Logger * log; + LoggerPtr log; /// You could specify custom metric to track memory usage. std::atomic metric = CurrentMetrics::end(); diff --git a/src/Common/OptimizedRegularExpression.cpp b/src/Common/OptimizedRegularExpression.cpp index 9773a4cf2c..188391bdab 100644 --- a/src/Common/OptimizedRegularExpression.cpp +++ b/src/Common/OptimizedRegularExpression.cpp @@ -463,7 +463,7 @@ catch (...) is_trivial = false; required_substring_is_prefix = false; alternatives.clear(); - LOG_ERROR(&Poco::Logger::get("OptimizeRegularExpression"), "Analyze RegularExpression failed, got error: {}", DB::getCurrentExceptionMessage(false)); + LOG_ERROR(getLogger("OptimizeRegularExpression"), "Analyze RegularExpression failed, got error: {}", DB::getCurrentExceptionMessage(false)); } OptimizedRegularExpression::OptimizedRegularExpression(const std::string & regexp_, int options) diff --git a/src/Common/PipeFDs.cpp b/src/Common/PipeFDs.cpp index adbaec427a..a984a7af31 100644 --- a/src/Common/PipeFDs.cpp +++ b/src/Common/PipeFDs.cpp @@ -120,7 +120,7 @@ void LazyPipeFDs::setNonBlockingReadWrite() void LazyPipeFDs::tryIncreaseSize(int desired_size) { #if defined(OS_LINUX) - Poco::Logger * log = &Poco::Logger::get("Pipe"); + LoggerPtr log = getLogger("Pipe"); /** Increase pipe size to avoid slowdown during fine-grained trace collection. */ diff --git a/src/Common/PoolBase.h b/src/Common/PoolBase.h index d36ba7e2db..4132f9a0b2 100644 --- a/src/Common/PoolBase.h +++ b/src/Common/PoolBase.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -223,9 +224,9 @@ private: std::condition_variable available; protected: - Poco::Logger * log; + LoggerPtr log; - PoolBase(unsigned max_items_, Poco::Logger * log_, BehaviourOnLimit behaviour_on_limit_ = BehaviourOnLimit::Wait) + PoolBase(unsigned max_items_, LoggerPtr log_, BehaviourOnLimit behaviour_on_limit_ = BehaviourOnLimit::Wait) : max_items(max_items_), behaviour_on_limit(behaviour_on_limit_), log(log_) { items.reserve(max_items); diff --git a/src/Common/PoolWithFailoverBase.h b/src/Common/PoolWithFailoverBase.h index e84e249d17..a1b75ccc14 100644 --- a/src/Common/PoolWithFailoverBase.h +++ b/src/Common/PoolWithFailoverBase.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -56,7 +57,7 @@ public: NestedPools nested_pools_, time_t decrease_error_period_, size_t max_error_cap_, - Poco::Logger * log_) + LoggerPtr log_) : nested_pools(std::move(nested_pools_)) , decrease_error_period(decrease_error_period_) , max_error_cap(max_error_cap_) @@ -156,7 +157,7 @@ protected: /// The time when error counts were last decreased. time_t last_error_decrease_time = 0; - Poco::Logger * log; + LoggerPtr log; }; diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index 17511329d5..30f4c82514 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -1317,7 +1317,7 @@ void increment(Event event, Count amount, Metrics::MetricType type, LabelledMetr } catch (DB::Exception & e) { - LOG_ERROR(&Poco::Logger::get("ProfileEvents"), "Metrics emit metric failed: {}", e.message()); + LOG_ERROR(getLogger("ProfileEvents"), "Metrics emit metric failed: {}", e.message()); } } diff --git a/src/Common/QueryProfiler.cpp b/src/Common/QueryProfiler.cpp index bd1cab42be..bdb6131761 100644 --- a/src/Common/QueryProfiler.cpp +++ b/src/Common/QueryProfiler.cpp @@ -79,7 +79,7 @@ namespace ErrorCodes template QueryProfilerBase::QueryProfilerBase(const UInt64 thread_id, const int clock_type, UInt32 period, const int pause_signal_) - : log(&Poco::Logger::get("QueryProfiler")) + : log(getLogger("QueryProfiler")) , pause_signal(pause_signal_) { #if USE_UNWIND diff --git a/src/Common/QueryProfiler.h b/src/Common/QueryProfiler.h index 8e2d09e0be..41a43e648c 100644 --- a/src/Common/QueryProfiler.h +++ b/src/Common/QueryProfiler.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -38,7 +39,7 @@ public: private: void tryCleanup(); - Poco::Logger * log; + LoggerPtr log; #if USE_UNWIND /// Timer id from timer_create(2) diff --git a/src/Common/ResourceMonitor.cpp b/src/Common/ResourceMonitor.cpp index 30c9a43aa3..44e552e3cf 100644 --- a/src/Common/ResourceMonitor.cpp +++ b/src/Common/ResourceMonitor.cpp @@ -127,7 +127,7 @@ CPUMonitor::CPUMonitor() if (-1 == fd) throwFromErrno("Cannot open file " + std::string(filename), errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE); in_container = inContainer(); - LOG_DEBUG(&Poco::Logger::get(__PRETTY_FUNCTION__), "The env is in container : {}", in_container); + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "The env is in container : {}", in_container); } CPUMonitor::~CPUMonitor() @@ -247,7 +247,7 @@ MemoryMonitor::MemoryMonitor() if (-1 == fd) throwFromErrno("Cannot open file " + std::string(filename), errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE); in_container = inContainer(); - LOG_DEBUG(&Poco::Logger::get(__PRETTY_FUNCTION__), "The env is in container : {}", in_container); + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "The env is in container : {}", in_container); } diff --git a/src/Common/RpcClientPool.h b/src/Common/RpcClientPool.h index 8d28aa1203..a6d7717be6 100644 --- a/src/Common/RpcClientPool.h +++ b/src/Common/RpcClientPool.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include @@ -58,7 +59,7 @@ public: : service_name(std::move(service_name_)) , lookup(std::move(lookup_)) , creator(std::move(creator_)) - , log(&Poco::Logger::get(getName() + ':' + getServiceName())) + , log(getLogger(getName() + ':' + getServiceName())) { } @@ -242,7 +243,7 @@ private: const String service_name; std::function lookup; std::function creator; - Poco::Logger * log; + LoggerPtr log; mutable std::mutex state_mutex; diff --git a/src/Common/SensitiveDataMasker.cpp b/src/Common/SensitiveDataMasker.cpp index 5bf40c420e..0e5a417366 100644 --- a/src/Common/SensitiveDataMasker.cpp +++ b/src/Common/SensitiveDataMasker.cpp @@ -106,7 +106,7 @@ SensitiveDataMasker::SensitiveDataMasker(const Poco::Util::AbstractConfiguration { Poco::Util::AbstractConfiguration::Keys keys; config.keys(config_prefix, keys); - Poco::Logger * logger = &Poco::Logger::get("SensitiveDataMaskerConfigRead"); + LoggerPtr logger = getLogger("SensitiveDataMaskerConfigRead"); std::set used_names; diff --git a/src/Common/ShellCommand.cpp b/src/Common/ShellCommand.cpp index 6a18cb7734..046ae6dd6a 100644 --- a/src/Common/ShellCommand.cpp +++ b/src/Common/ShellCommand.cpp @@ -48,9 +48,9 @@ ShellCommand::ShellCommand(pid_t pid_, int & in_fd_, int & out_fd_, int & err_fd { } -Poco::Logger * ShellCommand::getLogger() +LoggerPtr ShellCommand::getLogger() { - return &Poco::Logger::get("ShellCommand"); + return ::getLogger("ShellCommand"); } ShellCommand::~ShellCommand() diff --git a/src/Common/ShellCommand.h b/src/Common/ShellCommand.h index f1d808128f..8e852f9412 100644 --- a/src/Common/ShellCommand.h +++ b/src/Common/ShellCommand.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -49,7 +50,7 @@ private: bool tryWaitProcessWithTimeout(size_t timeout_in_seconds); - static Poco::Logger * getLogger(); + static LoggerPtr getLogger(); /// Print command name and the list of arguments to log. NOTE: No escaping of arguments is performed. static void logCommand(const char * filename, char * const argv[]); diff --git a/src/Common/StatusFile.cpp b/src/Common/StatusFile.cpp index 36b1732074..5e8353ee0d 100644 --- a/src/Common/StatusFile.cpp +++ b/src/Common/StatusFile.cpp @@ -56,9 +56,9 @@ StatusFile::StatusFile(std::string path_, FillFunction fill_) } if (!contents.empty()) - LOG_INFO(&Poco::Logger::get("StatusFile"), "Status file {} already exists - unclean restart. Contents:\n{}", path, contents); + LOG_INFO(getLogger("StatusFile"), "Status file {} already exists - unclean restart. Contents:\n{}", path, contents); else - LOG_INFO(&Poco::Logger::get("StatusFile"), "Status file {} already exists and is empty - probably unclean hardware restart.", path); + LOG_INFO(getLogger("StatusFile"), "Status file {} already exists and is empty - probably unclean hardware restart.", path); } fd = ::open(path.c_str(), O_WRONLY | O_CREAT | O_CLOEXEC, 0666); @@ -109,10 +109,10 @@ StatusFile::StatusFile(std::string path_, FillFunction fill_) StatusFile::~StatusFile() { if (0 != close(fd)) - LOG_ERROR(&Poco::Logger::get("StatusFile"), "Cannot close file {}, {}", path, errnoToString(ErrorCodes::CANNOT_CLOSE_FILE)); + LOG_ERROR(getLogger("StatusFile"), "Cannot close file {}, {}", path, errnoToString(ErrorCodes::CANNOT_CLOSE_FILE)); if (0 != unlink(path.c_str())) - LOG_ERROR(&Poco::Logger::get("StatusFile"), "Cannot unlink file {}, {}", path, errnoToString(ErrorCodes::CANNOT_CLOSE_FILE)); + LOG_ERROR(getLogger("StatusFile"), "Cannot unlink file {}, {}", path, errnoToString(ErrorCodes::CANNOT_CLOSE_FILE)); } } diff --git a/src/Common/StorageElection/ElectionReader.cpp b/src/Common/StorageElection/ElectionReader.cpp index 3ba1199764..775ced2694 100644 --- a/src/Common/StorageElection/ElectionReader.cpp +++ b/src/Common/StorageElection/ElectionReader.cpp @@ -29,7 +29,7 @@ namespace ErrorCodes } ElectionReader::ElectionReader(IKvStoragePtr store_, const String & election_key_) - : store(std::move(store_)), election_key(election_key_), logger(&Poco::Logger::get("ElectionReader")) + : store(std::move(store_)), election_key(election_key_), logger(getLogger("ElectionReader")) { LOG_INFO(logger, "election_key: {}", election_key); } diff --git a/src/Common/StorageElection/ElectionReader.h b/src/Common/StorageElection/ElectionReader.h index c42282e822..146ce14227 100644 --- a/src/Common/StorageElection/ElectionReader.h +++ b/src/Common/StorageElection/ElectionReader.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -40,7 +41,7 @@ private: mutable std::mutex leader_info_mutex; IKvStoragePtr store; String election_key; - Poco::Logger * logger = nullptr; + LoggerPtr logger = nullptr; }; } diff --git a/src/Common/StorageElection/StorageElector.cpp b/src/Common/StorageElection/StorageElector.cpp index 71c1198af9..a15b6c9ac6 100644 --- a/src/Common/StorageElection/StorageElector.cpp +++ b/src/Common/StorageElection/StorageElector.cpp @@ -39,7 +39,7 @@ StorageElector::StorageElector( , election_key(election_key_) , on_leader(std::move(on_leader_)) , on_follower(std::move(on_follower_)) - , logger(&Poco::Logger::get("StorageElector")) + , logger(getLogger("StorageElector")) { local_info.refresh_interval_ms = refresh_interval_ms; local_info.expired_interval_ms = expired_interval_ms; diff --git a/src/Common/StorageElection/StorageElector.h b/src/Common/StorageElection/StorageElector.h index dc0199a097..ec9258e6ce 100644 --- a/src/Common/StorageElection/StorageElector.h +++ b/src/Common/StorageElection/StorageElector.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -98,7 +99,7 @@ private: std::chrono::milliseconds sleep_time{std::chrono::milliseconds::zero()}; uint64_t last_refresh_local_time{0}; - Poco::Logger * logger = nullptr; + LoggerPtr logger = nullptr; void start(); diff --git a/src/Common/StorageElection/examples/memstore_election_demo.cpp b/src/Common/StorageElection/examples/memstore_election_demo.cpp index 30c3f83d78..92a8222631 100644 --- a/src/Common/StorageElection/examples/memstore_election_demo.cpp +++ b/src/Common/StorageElection/examples/memstore_election_demo.cpp @@ -21,9 +21,9 @@ const static int test_nodes = 3; -static Poco::Logger * getLogger() +static LoggerPtr getLogger() { - return &Poco::Logger::get("memstore_elector"); + return getLogger("memstore_elector"); } class MemStorage : public DB::IKvStorage diff --git a/src/Common/TLDListsHolder.cpp b/src/Common/TLDListsHolder.cpp index 1283b49c89..5c1f44771b 100644 --- a/src/Common/TLDListsHolder.cpp +++ b/src/Common/TLDListsHolder.cpp @@ -54,7 +54,7 @@ void TLDListsHolder::parseConfig(const std::string & top_level_domains_path, con Poco::Util::AbstractConfiguration::Keys config_keys; config.keys("top_level_domains_lists", config_keys); - Poco::Logger * log = &Poco::Logger::get("TLDListsHolder"); + LoggerPtr log = getLogger("TLDListsHolder"); for (const auto & key : config_keys) { diff --git a/src/Common/ThreadPool.cpp b/src/Common/ThreadPool.cpp index 621d47d205..376f153937 100644 --- a/src/Common/ThreadPool.cpp +++ b/src/Common/ThreadPool.cpp @@ -177,7 +177,7 @@ ReturnType ThreadPoolImpl::scheduleImpl(Job job, int priority, std::opti { auto tid = threads.front().gettid(); cpu_set->addTask(tid); - LOG_DEBUG(&Poco::Logger::get("ThreadPool"), "add thread : {}", tid); + LOG_DEBUG(getLogger("ThreadPool"), "add thread : {}", tid); } } } @@ -190,7 +190,7 @@ ReturnType ThreadPoolImpl::scheduleImpl(Job job, int priority, std::opti { DB::CpuSetPtr system_cpu_set = cgroup_manager.getSystemCpuSet(); system_cpu_set->addTask(threads.front().gettid()); - LOG_DEBUG(&Poco::Logger::get("ThreadPool"), "clear thread for exception : {}", threads.front().gettid()); + LOG_DEBUG(getLogger("ThreadPool"), "clear thread for exception : {}", threads.front().gettid()); } } @@ -288,7 +288,7 @@ void ThreadPoolImpl::finalize() DB::CpuSetPtr system_cpu_set = cgroup_manager.getSystemCpuSet(); system_cpu_set->addTasks(tids); } - LOG_DEBUG(&Poco::Logger::get("ThreadPool"), "clear thread for finalize : {}", ss.str()); + LOG_DEBUG(getLogger("ThreadPool"), "clear thread for finalize : {}", ss.str()); } threads.clear(); @@ -320,7 +320,7 @@ void ThreadPoolImpl::worker(typename std::list::iterator thread_ { auto tid = DB::SystemUtils::gettid(); cpu->addTask(tid); - LOG_DEBUG(&Poco::Logger::get("ThreadPool"), "add thread : {}", tid); + LOG_DEBUG(getLogger("ThreadPool"), "add thread : {}", tid); } bool job_is_done = false; @@ -367,7 +367,7 @@ void ThreadPoolImpl::worker(typename std::list::iterator thread_ { DB::CpuSetPtr system_cpu_set = cgroup_manager.getSystemCpuSet(); system_cpu_set->addTask(thread_it->gettid()); - LOG_DEBUG(&Poco::Logger::get("ThreadPool"), "clear thread for max_threads : {}", thread_it->gettid()); + LOG_DEBUG(getLogger("ThreadPool"), "clear thread for max_threads : {}", thread_it->gettid()); } } diff --git a/src/Common/ThreadPoolTaskTracker.cpp b/src/Common/ThreadPoolTaskTracker.cpp index 12fe9c55df..a2676f12e9 100644 --- a/src/Common/ThreadPoolTaskTracker.cpp +++ b/src/Common/ThreadPoolTaskTracker.cpp @@ -138,7 +138,7 @@ void TaskTracker::waitTilInflightShrink() return; if (futures.size() >= max_tasks_inflight) - LOG_DEBUG(&Poco::Logger::get("TaskTracker"), "have to wait some tasks finish, in queue {}, limit {}", futures.size(), max_tasks_inflight); + LOG_DEBUG(getLogger("TaskTracker"), "have to wait some tasks finish, in queue {}, limit {}", futures.size(), max_tasks_inflight); Stopwatch watch; diff --git a/src/Common/ThreadProfileEvents.cpp b/src/Common/ThreadProfileEvents.cpp index 7b69bf766d..860ec15993 100644 --- a/src/Common/ThreadProfileEvents.cpp +++ b/src/Common/ThreadProfileEvents.cpp @@ -247,7 +247,7 @@ static void enablePerfEvent(int event_fd) { if (ioctl(event_fd, PERF_EVENT_IOC_ENABLE, 0)) { - LOG_WARNING(&Poco::Logger::get("PerfEvents"), + LOG_WARNING(getLogger("PerfEvents"), "Can't enable perf event with file descriptor {}: '{}' ({})", event_fd, errnoToString(errno), errno); } @@ -257,7 +257,7 @@ static void disablePerfEvent(int event_fd) { if (ioctl(event_fd, PERF_EVENT_IOC_DISABLE, 0)) { - LOG_WARNING(&Poco::Logger::get("PerfEvents"), + LOG_WARNING(getLogger("PerfEvents"), "Can't disable perf event with file descriptor {}: '{}' ({})", event_fd, errnoToString(errno), errno); } @@ -267,7 +267,7 @@ static void releasePerfEvent(int event_fd) { if (close(event_fd)) { - LOG_WARNING(&Poco::Logger::get("PerfEvents"), + LOG_WARNING(getLogger("PerfEvents"), "Can't close perf event file descriptor {}: {} ({})", event_fd, errnoToString(errno), errno); } @@ -280,12 +280,12 @@ static bool validatePerfEventDescriptor(int & fd) if (errno == EBADF) { - LOG_WARNING(&Poco::Logger::get("PerfEvents"), + LOG_WARNING(getLogger("PerfEvents"), "Event descriptor {} was closed from the outside; reopening", fd); } else { - LOG_WARNING(&Poco::Logger::get("PerfEvents"), + LOG_WARNING(getLogger("PerfEvents"), "Error while checking availability of event descriptor {}: {} ({})", fd, errnoToString(errno), errno); @@ -363,7 +363,7 @@ bool PerfEventsCounters::processThreadLocalChanges(const std::string & needed_ev bool has_cap_sys_admin = hasLinuxCapability(CAP_SYS_ADMIN); if (perf_event_paranoid >= 3 && !has_cap_sys_admin) { - LOG_WARNING(&Poco::Logger::get("PerfEvents"), + LOG_WARNING(getLogger("PerfEvents"), "Not enough permissions to record perf events: " "perf_event_paranoid = {} and CAP_SYS_ADMIN = 0", perf_event_paranoid); @@ -391,7 +391,7 @@ bool PerfEventsCounters::processThreadLocalChanges(const std::string & needed_ev // ENOENT means that the event is not supported. Don't log it, because // this is called for each thread and would be too verbose. Log other // error codes because they might signify an error. - LOG_WARNING(&Poco::Logger::get("PerfEvents"), + LOG_WARNING(getLogger("PerfEvents"), "Failed to open perf event {} (event_type={}, event_config={}): " "'{}' ({})", event_info.settings_name, event_info.event_type, event_info.event_config, errnoToString(errno), errno); @@ -431,7 +431,7 @@ std::vector PerfEventsCounters::eventIndicesFromString(const std::string } else { - LOG_ERROR(&Poco::Logger::get("PerfEvents"), + LOG_ERROR(getLogger("PerfEvents"), "Unknown perf event name '{}' specified in settings", event_name); } } @@ -478,7 +478,7 @@ void PerfEventsCounters::finalizeProfileEvents(ProfileEvents::Counters & profile if (bytes_read != bytes_to_read) { - LOG_WARNING(&Poco::Logger::get("PerfEvents"), + LOG_WARNING(getLogger("PerfEvents"), "Can't read event value from file descriptor {}: '{}' ({})", fd, errnoToString(errno), errno); current_values[i] = {}; diff --git a/src/Common/ThreadStatus.cpp b/src/Common/ThreadStatus.cpp index 775bd19893..64547b40c9 100644 --- a/src/Common/ThreadStatus.cpp +++ b/src/Common/ThreadStatus.cpp @@ -82,7 +82,7 @@ ThreadStatus::ThreadStatus() last_rusage = std::make_unique(); memory_tracker.setDescription("(for thread)"); - log = &Poco::Logger::get("ThreadStatus"); + log = getLogger("ThreadStatus"); current_thread = this; diff --git a/src/Common/ThreadStatus.h b/src/Common/ThreadStatus.h index 744a34da4c..822edae70e 100644 --- a/src/Common/ThreadStatus.h +++ b/src/Common/ThreadStatus.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -221,7 +222,7 @@ protected: std::unique_ptr query_profiler_real; std::unique_ptr query_profiler_cpu; - Poco::Logger * log = nullptr; + LoggerPtr log = nullptr; friend class CurrentThread; diff --git a/src/Common/TraceCollector.cpp b/src/Common/TraceCollector.cpp index 1c6074c5cf..c7cfe3cfb0 100644 --- a/src/Common/TraceCollector.cpp +++ b/src/Common/TraceCollector.cpp @@ -52,7 +52,7 @@ TraceCollector::TraceCollector(std::shared_ptr trace_log_) TraceCollector::~TraceCollector() { if (!thread.joinable()) - LOG_ERROR(&Poco::Logger::get("TraceCollector"), "TraceCollector thread is malformed and cannot be joined"); + LOG_ERROR(getLogger("TraceCollector"), "TraceCollector thread is malformed and cannot be joined"); else stop(); diff --git a/src/Common/ZooKeeper/ZooKeeper.cpp b/src/Common/ZooKeeper/ZooKeeper.cpp index cf0bcbaf70..c25f1775a2 100644 --- a/src/Common/ZooKeeper/ZooKeeper.cpp +++ b/src/Common/ZooKeeper/ZooKeeper.cpp @@ -69,7 +69,7 @@ static void check(Coordination::Error code, const std::string & path) void ZooKeeper::init(const std::string & implementation_, const Strings & hosts_, const std::string & identity_, int32_t session_timeout_ms_, int32_t operation_timeout_ms_, const std::string & chroot_) { - log = &Poco::Logger::get("ZooKeeper"); + log = getLogger("ZooKeeper"); hosts = hosts_; identity = identity_; session_timeout_ms = session_timeout_ms_; @@ -244,7 +244,7 @@ struct ZooKeeperArgs } else if (!endpoints.empty()) { - LOG_WARNING(&Poco::Logger::get("Zookeeper"), "Get Zookeeper node from config and service_discovery. Will use the first one"); + LOG_WARNING(getLogger("Zookeeper"), "Get Zookeeper node from config and service_discovery. Will use the first one"); } if (!chroot.empty()) diff --git a/src/Common/ZooKeeper/ZooKeeper.h b/src/Common/ZooKeeper/ZooKeeper.h index 0234fba239..4f9c41ae49 100644 --- a/src/Common/ZooKeeper/ZooKeeper.h +++ b/src/Common/ZooKeeper/ZooKeeper.h @@ -21,6 +21,7 @@ #pragma once +#include #include "Types.h" #include #include @@ -344,7 +345,7 @@ private: std::mutex mutex; - Poco::Logger * log = nullptr; + LoggerPtr log = nullptr; std::shared_ptr zk_log; AtomicStopwatch session_uptime; diff --git a/src/Common/ZooKeeper/ZooKeeperImpl.cpp b/src/Common/ZooKeeper/ZooKeeperImpl.cpp index 14abd23703..59abf27e2c 100644 --- a/src/Common/ZooKeeper/ZooKeeperImpl.cpp +++ b/src/Common/ZooKeeper/ZooKeeperImpl.cpp @@ -1080,7 +1080,7 @@ void ZooKeeper::initApiVersion() promise->set_value(response); }; - auto * log = &Poco::Logger::get("ZooKeeperClient"); + auto log = getLogger("ZooKeeperClient"); get(keeper_api_version_path, std::move(callback), {}); if (future.wait_for(std::chrono::milliseconds(operation_timeout.totalMilliseconds())) != std::future_status::ready) diff --git a/src/Common/parseRemoteDescription.cpp b/src/Common/parseRemoteDescription.cpp index d5ccf05641..cb39644e35 100644 --- a/src/Common/parseRemoteDescription.cpp +++ b/src/Common/parseRemoteDescription.cpp @@ -184,7 +184,7 @@ std::vector> parseRemoteDescriptionForExternalDataba size_t colon = address.find(':'); if (colon == String::npos) { - LOG_WARNING(&Poco::Logger::get("ParseRemoteDescription"), "Port is not found for host: {}. Using default port {}", address, default_port); + LOG_WARNING(getLogger("ParseRemoteDescription"), "Port is not found for host: {}. Using default port {}", address, default_port); result.emplace_back(removeBracketsIfIpv6(address), default_port); } else diff --git a/src/Common/serverLocality.cpp b/src/Common/serverLocality.cpp index 393809b39b..fed99dae4b 100644 --- a/src/Common/serverLocality.cpp +++ b/src/Common/serverLocality.cpp @@ -27,7 +27,7 @@ bool isLocalServer(const std::string & target, const std::string & port) const size_t pos = target.find_last_of(':'); if (std::string::npos == pos) { - LOG_ERROR(&Poco::Logger::get(__PRETTY_FUNCTION__), + LOG_ERROR(getLogger(__PRETTY_FUNCTION__), "Parse isLocalServer failed because cannot find colon in address {}", target); return false; } diff --git a/src/Common/tests/gtest_log.cpp b/src/Common/tests/gtest_log.cpp index 9f4ef41f64..648287ad2b 100644 --- a/src/Common/tests/gtest_log.cpp +++ b/src/Common/tests/gtest_log.cpp @@ -12,7 +12,7 @@ TEST(Logger, Log) { Poco::Logger::root().setLevel("none"); Poco::Logger::root().setChannel(Poco::AutoPtr(new Poco::NullChannel())); - Poco::Logger * log = &Poco::Logger::get("Log"); + LoggerPtr log = getLogger("Log"); /// This test checks that we don't pass this string to fmtlib, because it is the only argument. EXPECT_NO_THROW(LOG_INFO(log, "Hello {} World")); diff --git a/src/Coordination/Changelog.cpp b/src/Coordination/Changelog.cpp index 51e4fec5bd..52e4d5d688 100644 --- a/src/Coordination/Changelog.cpp +++ b/src/Coordination/Changelog.cpp @@ -203,7 +203,7 @@ public: } /// start_log_index -- all entries with index < start_log_index will be skipped, but accounted into total_entries_read_from_log - ChangelogReadResult readChangelog(IndexToLogEntry & logs, uint64_t start_log_index, Poco::Logger * log) + ChangelogReadResult readChangelog(IndexToLogEntry & logs, uint64_t start_log_index, LoggerPtr log) { ChangelogReadResult result{}; try @@ -296,7 +296,7 @@ Changelog::Changelog( const std::string & changelogs_dir_, uint64_t rotate_interval_, bool force_sync_, - Poco::Logger * log_, + LoggerPtr log_, bool compress_logs_) : changelogs_dir(changelogs_dir_) , rotate_interval(rotate_interval_) diff --git a/src/Coordination/Changelog.h b/src/Coordination/Changelog.h index 08710a7e24..0506c9e29d 100644 --- a/src/Coordination/Changelog.h +++ b/src/Coordination/Changelog.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -94,7 +95,7 @@ class Changelog public: Changelog(const std::string & changelogs_dir_, uint64_t rotate_interval_, - bool force_sync_, Poco::Logger * log_, bool compress_logs_ = true); + bool force_sync_, LoggerPtr log_, bool compress_logs_ = true); /// Read changelog from files on changelogs_dir_ skipping all entries before from_log_index /// Truncate broken entries, remove files after broken entries. @@ -165,7 +166,7 @@ private: const std::string changelogs_dir; const uint64_t rotate_interval; const bool force_sync; - Poco::Logger * log; + LoggerPtr log; bool compress_logs; /// Currently existing changelogs diff --git a/src/Coordination/FourLetterCommand.cpp b/src/Coordination/FourLetterCommand.cpp index 3a9d49b4df..04540da70d 100644 --- a/src/Coordination/FourLetterCommand.cpp +++ b/src/Coordination/FourLetterCommand.cpp @@ -202,7 +202,7 @@ void FourLetterCommandFactory::initializeAllowList(KeeperDispatcher & keeper_dis } else { - auto * log = &Poco::Logger::get("FourLetterCommandFactory"); + auto log = getLogger("FourLetterCommandFactory"); LOG_WARNING(log, "Find invalid keeper 4lw command {} when initializing, ignore it.", token); } } diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index 61c4d16641..3bf89a2738 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -40,7 +40,7 @@ namespace ErrorCodes KeeperDispatcher::KeeperDispatcher() : responses_queue(std::numeric_limits::max()) , configuration_and_settings(std::make_shared()) - , log(&Poco::Logger::get("KeeperDispatcher")) + , log(getLogger("KeeperDispatcher")) { } diff --git a/src/Coordination/KeeperDispatcher.h b/src/Coordination/KeeperDispatcher.h index f733854d5b..54012391e2 100644 --- a/src/Coordination/KeeperDispatcher.h +++ b/src/Coordination/KeeperDispatcher.h @@ -22,6 +22,7 @@ #if USE_NURAFT +#include #include #include #include @@ -90,7 +91,7 @@ private: KeeperConfigurationAndSettingsPtr configuration_and_settings; - Poco::Logger * log; + LoggerPtr log; /// Counter for new session_id requests. std::atomic internal_session_id_counter{0}; diff --git a/src/Coordination/KeeperLogStore.cpp b/src/Coordination/KeeperLogStore.cpp index b3835ed127..34fc5e4919 100644 --- a/src/Coordination/KeeperLogStore.cpp +++ b/src/Coordination/KeeperLogStore.cpp @@ -26,7 +26,7 @@ namespace DB { KeeperLogStore::KeeperLogStore(const std::string & changelogs_path, uint64_t rotate_interval_, bool force_sync_, bool compress_logs_) - : log(&Poco::Logger::get("KeeperLogStore")) + : log(getLogger("KeeperLogStore")) , changelog(changelogs_path, rotate_interval_, force_sync_, log, compress_logs_) { if (force_sync_) diff --git a/src/Coordination/KeeperLogStore.h b/src/Coordination/KeeperLogStore.h index 2954b1d562..3433eed973 100644 --- a/src/Coordination/KeeperLogStore.h +++ b/src/Coordination/KeeperLogStore.h @@ -20,6 +20,7 @@ */ #pragma once +#include #include // Y_IGNORE #include #include @@ -84,7 +85,7 @@ public: private: mutable std::mutex changelog_lock; - Poco::Logger * log; + LoggerPtr log; Changelog changelog; }; diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index 430143c0df..e39eed1f2a 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -102,7 +102,7 @@ std::string checkAndGetSuperdigest(const String & user_and_digest) return user_and_digest; } -int32_t getValueOrMaxInt32AndLogWarning(uint64_t value, const std::string & name, Poco::Logger * log) +int32_t getValueOrMaxInt32AndLogWarning(uint64_t value, const std::string & name, LoggerPtr log) { if (value > std::numeric_limits::max()) { @@ -127,7 +127,7 @@ KeeperServer::KeeperServer( SnapshotsQueue & snapshots_queue_) : server_id(configuration_and_settings_->server_id) , coordination_settings(configuration_and_settings_->coordination_settings) - , log(&Poco::Logger::get("KeeperServer")) + , log(getLogger("KeeperServer")) , is_recovering(config.has("keeper_server.force_recovery") && config.getBool("keeper_server.force_recovery")) , keeper_context{std::make_shared()} { diff --git a/src/Coordination/KeeperServer.h b/src/Coordination/KeeperServer.h index 951301e159..242cfe2829 100644 --- a/src/Coordination/KeeperServer.h +++ b/src/Coordination/KeeperServer.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -70,7 +71,7 @@ private: nuraft::ptr last_local_config; - Poco::Logger * log; + LoggerPtr log; /// Callback func which is called by NuRaft on all internal events. /// Used to determine the moment when raft is ready to server new requests diff --git a/src/Coordination/KeeperSnapshotManager.cpp b/src/Coordination/KeeperSnapshotManager.cpp index cec69a7a55..be5df5de12 100644 --- a/src/Coordination/KeeperSnapshotManager.cpp +++ b/src/Coordination/KeeperSnapshotManager.cpp @@ -389,7 +389,7 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial { if (keeper_context->ignore_system_path_on_startup || keeper_context->server_state != KeeperContext::Phase::INIT) { - LOG_ERROR(&Poco::Logger::get("KeeperSnapshotManager"), "{}. Ignoring it", error_msg); + LOG_ERROR(getLogger("KeeperSnapshotManager"), "{}. Ignoring it", error_msg); continue; } else @@ -403,7 +403,7 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial { if (keeper_context->ignore_system_path_on_startup || keeper_context->server_state != KeeperContext::Phase::INIT) { - LOG_ERROR(&Poco::Logger::get("KeeperSnapshotManager"), "{}. Ignoring it", error_msg); + LOG_ERROR(getLogger("KeeperSnapshotManager"), "{}. Ignoring it", error_msg); node = KeeperStorage::Node{}; } else diff --git a/src/Coordination/KeeperStateMachine.cpp b/src/Coordination/KeeperStateMachine.cpp index f2b1a39d24..2108815b63 100644 --- a/src/Coordination/KeeperStateMachine.cpp +++ b/src/Coordination/KeeperStateMachine.cpp @@ -60,7 +60,7 @@ KeeperStateMachine::KeeperStateMachine( , responses_queue(responses_queue_) , snapshots_queue(snapshots_queue_) , last_committed_idx(0) - , log(&Poco::Logger::get("KeeperStateMachine")) + , log(getLogger("KeeperStateMachine")) , superdigest(superdigest_) , keeper_context(keeper_context_) { @@ -130,7 +130,7 @@ void assertDigest( if (!KeeperStorage::checkDigest(first, second)) { LOG_FATAL( - &Poco::Logger::get("KeeperStateMachine"), + getLogger("KeeperStateMachine"), "Digest for nodes is not matching after {} request of type '{}'.\nExpected digest - {}, actual digest - {} (digest version " "{}). Keeper will " "terminate to avoid inconsistencies.\nExtra information about the request:\n{}", @@ -426,7 +426,7 @@ void KeeperStateMachine::save_logical_snp_obj( } } -static int bufferFromFile(Poco::Logger * log, const std::string & path, nuraft::ptr & data_out) +static int bufferFromFile(LoggerPtr log, const std::string & path, nuraft::ptr & data_out) { if (path.empty() || !std::filesystem::exists(path)) { diff --git a/src/Coordination/KeeperStateMachine.h b/src/Coordination/KeeperStateMachine.h index b8430e4497..c453e66cb1 100644 --- a/src/Coordination/KeeperStateMachine.h +++ b/src/Coordination/KeeperStateMachine.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -152,7 +153,7 @@ private: /// Last committed Raft log number. std::atomic last_committed_idx; - Poco::Logger * log; + LoggerPtr log; /// Cluster config for our quorum. /// It's a copy of config stored in StateManager, but here diff --git a/src/Coordination/KeeperStateManager.cpp b/src/Coordination/KeeperStateManager.cpp index fbc7057746..7e5815457c 100644 --- a/src/Coordination/KeeperStateManager.cpp +++ b/src/Coordination/KeeperStateManager.cpp @@ -162,7 +162,7 @@ KeeperStateManager::KeeperStateManager( , secure(false) , log_store(nuraft::cs_new(logs_path, 5000, false, false)) , server_state_path(state_file_path) - , logger(&Poco::Logger::get("KeeperStateManager")) + , logger(getLogger("KeeperStateManager")) { auto peer_config = nuraft::cs_new(my_server_id, host + ":" + std::to_string(port)); configuration_wrapper.cluster_config = nuraft::cs_new(); @@ -188,7 +188,7 @@ KeeperStateManager::KeeperStateManager( coordination_settings->force_sync, coordination_settings->compress_logs)) , server_state_path(state_file_path) - , logger(&Poco::Logger::get("KeeperStateManager")) + , logger(getLogger("KeeperStateManager")) { } diff --git a/src/Coordination/KeeperStateManager.h b/src/Coordination/KeeperStateManager.h index 61c203247a..12dc1360ef 100644 --- a/src/Coordination/KeeperStateManager.h +++ b/src/Coordination/KeeperStateManager.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -173,7 +174,7 @@ private: const std::filesystem::path server_state_path; - Poco::Logger * logger; + LoggerPtr logger; public: /// Parse configuration from xml config. diff --git a/src/Coordination/KeeperStorage.cpp b/src/Coordination/KeeperStorage.cpp index 3d7eadef42..3a8a3eee4c 100644 --- a/src/Coordination/KeeperStorage.cpp +++ b/src/Coordination/KeeperStorage.cpp @@ -508,7 +508,7 @@ namespace [[noreturn]] void onStorageInconsistency() { LOG_ERROR( - &Poco::Logger::get("KeeperStorage"), + getLogger("KeeperStorage"), "Inconsistency found between uncommitted and committed data. Keeper will terminate to avoid undefined behaviour."); std::terminate(); } @@ -763,7 +763,7 @@ void handleSystemNodeModification(const KeeperContext & keeper_context, std::str "If you still want to ignore it, you can set 'keeper_server.ignore_system_path_on_startup' to true.", error_msg); - LOG_ERROR(&Poco::Logger::get("KeeperStorage"), std::string(error_msg)); + LOG_ERROR(getLogger("KeeperStorage"), std::string(error_msg)); } } diff --git a/src/Coordination/LeaderElection.h b/src/Coordination/LeaderElection.h index 1d6cf8352a..4422b23d51 100644 --- a/src/Coordination/LeaderElection.h +++ b/src/Coordination/LeaderElection.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -51,7 +52,7 @@ public: , identifier(allow_multiple_leaders_ ? (identifier_ + suffix) : identifier_) , allow_multiple_leaders(allow_multiple_leaders_) , log_name("LeaderElection (" + path + ")") - , log(&Poco::Logger::get(log_name)) + , log(getLogger(log_name)) { task = pool.createTask(log_name, [this] { threadFunction(); }); createNode(); @@ -81,7 +82,7 @@ private: std::string identifier; bool allow_multiple_leaders; std::string log_name; - Poco::Logger * log; + LoggerPtr log; EphemeralNodeHolderPtr node; std::string node_name; diff --git a/src/Coordination/LoggerWrapper.h b/src/Coordination/LoggerWrapper.h index 25a1969d2e..2f8ed4599f 100644 --- a/src/Coordination/LoggerWrapper.h +++ b/src/Coordination/LoggerWrapper.h @@ -1,5 +1,6 @@ #pragma once +#include #include // Y_IGNORE #include #include @@ -25,7 +26,7 @@ private: public: LoggerWrapper(const std::string & name, LogsLevel level_) - : log(&Poco::Logger::get(name)) + : log(getRawLogger(name)) , level(level_) { log->setLevel(static_cast(LEVELS.at(level))); @@ -56,7 +57,8 @@ public: } private: - Poco::Logger * log; + /// stick to raw logger to support setLevel + LoggerRawPtr log; std::atomic level; }; diff --git a/src/Coordination/ZooKeeperDataReader.cpp b/src/Coordination/ZooKeeperDataReader.cpp index aa20517398..5ca18f60cf 100644 --- a/src/Coordination/ZooKeeperDataReader.cpp +++ b/src/Coordination/ZooKeeperDataReader.cpp @@ -110,7 +110,7 @@ void deserializeACLMap(KeeperStorage & storage, ReadBuffer & in) } } -int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in, Poco::Logger * log) +int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in, LoggerPtr log) { int64_t max_zxid = 0; std::string path; @@ -167,7 +167,7 @@ int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in, Poco::L return max_zxid; } -void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::string & snapshot_path, Poco::Logger * log) +void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::string & snapshot_path, LoggerPtr log) { LOG_INFO(log, "Deserializing storage snapshot {}", snapshot_path); int64_t zxid = getZxidFromName(snapshot_path); @@ -206,7 +206,7 @@ void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::st LOG_INFO(log, "Finished, snapshot ZXID {}", storage.zxid); } -void deserializeKeeperStorageFromSnapshotsDir(KeeperStorage & storage, const std::string & path, Poco::Logger * log) +void deserializeKeeperStorageFromSnapshotsDir(KeeperStorage & storage, const std::string & path, LoggerPtr log) { namespace fs = std::filesystem; std::map existing_snapshots; @@ -491,7 +491,7 @@ bool hasErrorsInMultiRequest(Coordination::ZooKeeperRequestPtr request) } -bool deserializeTxn(KeeperStorage & storage, ReadBuffer & in, Poco::Logger * /*log*/) +bool deserializeTxn(KeeperStorage & storage, ReadBuffer & in, LoggerPtr /*log*/) { int64_t checksum; Coordination::read(checksum, in); @@ -546,7 +546,7 @@ bool deserializeTxn(KeeperStorage & storage, ReadBuffer & in, Poco::Logger * /*l return true; } -void deserializeLogAndApplyToStorage(KeeperStorage & storage, const std::string & log_path, Poco::Logger * log) +void deserializeLogAndApplyToStorage(KeeperStorage & storage, const std::string & log_path, LoggerPtr log) { ReadBufferFromFile reader(log_path); @@ -570,7 +570,7 @@ void deserializeLogAndApplyToStorage(KeeperStorage & storage, const std::string LOG_INFO(log, "Finished {} deserialization, totally read {} records", log_path, counter); } -void deserializeLogsAndApplyToStorage(KeeperStorage & storage, const std::string & path, Poco::Logger * log) +void deserializeLogsAndApplyToStorage(KeeperStorage & storage, const std::string & path, LoggerPtr log) { namespace fs = std::filesystem; std::map existing_logs; diff --git a/src/Coordination/ZooKeeperDataReader.h b/src/Coordination/ZooKeeperDataReader.h index 5f26457c11..c9846bbb92 100644 --- a/src/Coordination/ZooKeeperDataReader.h +++ b/src/Coordination/ZooKeeperDataReader.h @@ -1,4 +1,5 @@ #pragma once +#include #include #include #include @@ -6,12 +7,12 @@ namespace DB { -void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::string & snapshot_path, Poco::Logger * log); +void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::string & snapshot_path, LoggerPtr log); -void deserializeKeeperStorageFromSnapshotsDir(KeeperStorage & storage, const std::string & path, Poco::Logger * log); +void deserializeKeeperStorageFromSnapshotsDir(KeeperStorage & storage, const std::string & path, LoggerPtr log); -void deserializeLogAndApplyToStorage(KeeperStorage & storage, const std::string & log_path, Poco::Logger * log); +void deserializeLogAndApplyToStorage(KeeperStorage & storage, const std::string & log_path, LoggerPtr log); -void deserializeLogsAndApplyToStorage(KeeperStorage & storage, const std::string & path, Poco::Logger * log); +void deserializeLogsAndApplyToStorage(KeeperStorage & storage, const std::string & path, LoggerPtr log); } diff --git a/src/Core/BackgroundSchedulePool.cpp b/src/Core/BackgroundSchedulePool.cpp index 3a3e12ed4f..b19b44436c 100644 --- a/src/Core/BackgroundSchedulePool.cpp +++ b/src/Core/BackgroundSchedulePool.cpp @@ -131,7 +131,7 @@ void BackgroundSchedulePoolTaskInfo::execute() static const int32_t slow_execution_threshold_ms = 200; if (milliseconds >= slow_execution_threshold_ms) - LOG_TRACE(&Poco::Logger::get(log_name), "Execution took {} ms.", milliseconds); + LOG_TRACE(getLogger(log_name), "Execution took {} ms.", milliseconds); { std::lock_guard lock_schedule(schedule_mutex); @@ -185,7 +185,7 @@ BackgroundSchedulePool::BackgroundSchedulePool(size_t size_, CurrentMetrics::Met , thread_name(thread_name_) , cpu_set(std::move(cpu_set_)) { - LOG_INFO(&Poco::Logger::get("BackgroundSchedulePool/" + thread_name), "Create BackgroundSchedulePool with {} threads", size); + LOG_INFO(getLogger("BackgroundSchedulePool/" + thread_name), "Create BackgroundSchedulePool with {} threads", size); threads.reserve(size); for (size_t i = 0; i < size; ++i) @@ -195,7 +195,7 @@ BackgroundSchedulePool::BackgroundSchedulePool(size_t size_, CurrentMetrics::Met { size_t tid = thread.gettid(); if (tid == 0) - LOG_WARNING(&Poco::Logger::get("BackgroundSchedulePool"), "get tid: 0"); + LOG_WARNING(getLogger("BackgroundSchedulePool"), "get tid: 0"); else cpu_set->addTask(tid); } @@ -219,7 +219,7 @@ BackgroundSchedulePool::~BackgroundSchedulePool() queue.wakeUpAll(); delayed_thread.join(); - LOG_TRACE(&Poco::Logger::get("BackgroundSchedulePool/" + thread_name), "Waiting for threads to finish."); + LOG_TRACE(getLogger("BackgroundSchedulePool/" + thread_name), "Waiting for threads to finish."); std::vector tids; for (auto & thread : threads) diff --git a/src/Core/BaseSettings.cpp b/src/Core/BaseSettings.cpp index ff2ddf81cf..60452e1f39 100644 --- a/src/Core/BaseSettings.cpp +++ b/src/Core/BaseSettings.cpp @@ -47,7 +47,7 @@ void BaseSettingsHelpers::throwSettingNotFound(const std::string_view & name) void BaseSettingsHelpers::warningSettingNotFound(const std::string_view & name) { - static auto * log = &Poco::Logger::get("Settings"); + static auto log = getLogger("Settings"); LOG_WARNING(log, "Unknown setting {}, skipping", name); } diff --git a/src/Core/MySQL/Authentication.cpp b/src/Core/MySQL/Authentication.cpp index 15e305a545..bafdb0a072 100644 --- a/src/Core/MySQL/Authentication.cpp +++ b/src/Core/MySQL/Authentication.cpp @@ -119,8 +119,8 @@ void Native41::authenticate( #if USE_SSL -Sha256Password::Sha256Password(RSA & public_key_, RSA & private_key_, Poco::Logger * log_) - : public_key(public_key_), private_key(private_key_), log(log_) +Sha256Password::Sha256Password(RSA & public_key_, RSA & private_key_, const String & log_name_) + : public_key(public_key_), private_key(private_key_), log(getLogger(log_name_)) { /** Native authentication sent 20 bytes + '\0' character = 21 bytes. * This plugin must do the same to stay consistent with historical behavior if it is set to operate as a default plugin. [1] diff --git a/src/Core/MySQL/Authentication.h b/src/Core/MySQL/Authentication.h index 4bc3172c2d..ab3e39db86 100644 --- a/src/Core/MySQL/Authentication.h +++ b/src/Core/MySQL/Authentication.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -62,7 +63,7 @@ private: class Sha256Password : public IPlugin { public: - Sha256Password(RSA & public_key_, RSA & private_key_, Poco::Logger * log_); + Sha256Password(RSA & public_key_, RSA & private_key_, const String & log_name_); String getName() override { return "sha256_password"; } @@ -75,7 +76,7 @@ public: private: RSA & public_key; RSA & private_key; - Poco::Logger * log; + LoggerPtr log; String scramble; }; #endif diff --git a/src/Core/PostgreSQL/Connection.cpp b/src/Core/PostgreSQL/Connection.cpp index c423d75981..8c73a65b43 100644 --- a/src/Core/PostgreSQL/Connection.cpp +++ b/src/Core/PostgreSQL/Connection.cpp @@ -6,7 +6,7 @@ namespace postgres Connection::Connection(const ConnectionInfo & connection_info_, bool replication_, size_t num_tries_) : connection_info(connection_info_), replication(replication_), num_tries(num_tries_) - , log(&Poco::Logger::get("PostgreSQLReplicaConnection")) + , log(getLogger("PostgreSQLReplicaConnection")) { if (replication) { @@ -63,7 +63,7 @@ void Connection::updateConnection() connection = std::make_unique(connection_info.first); if (replication) connection->set_variable("default_transaction_isolation", "'repeatable read'"); - LOG_DEBUG(&Poco::Logger::get("PostgreSQLConnection"), "New connection to {}", connection_info.second); + LOG_DEBUG(getLogger("PostgreSQLConnection"), "New connection to {}", connection_info.second); } void Connection::connect() diff --git a/src/Core/PostgreSQL/Connection.h b/src/Core/PostgreSQL/Connection.h index e01de419c1..f62037a1e9 100644 --- a/src/Core/PostgreSQL/Connection.h +++ b/src/Core/PostgreSQL/Connection.h @@ -1,5 +1,6 @@ #pragma once +#include #include // Y_IGNORE #include #include @@ -42,6 +43,6 @@ private: bool replication; size_t num_tries; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Core/PostgreSQL/PoolWithFailover.cpp b/src/Core/PostgreSQL/PoolWithFailover.cpp index 6bf756b8a1..1c946a399f 100644 --- a/src/Core/PostgreSQL/PoolWithFailover.cpp +++ b/src/Core/PostgreSQL/PoolWithFailover.cpp @@ -20,7 +20,7 @@ PoolWithFailover::PoolWithFailover( : pool_wait_timeout(pool_wait_timeout_) , max_tries(max_tries_) { - LOG_TRACE(&Poco::Logger::get("PostgreSQLConnectionPool"), "PostgreSQL connection pool size: {}, connection wait timeout: {}, max failover tries: {}", + LOG_TRACE(getLogger("PostgreSQLConnectionPool"), "PostgreSQL connection pool size: {}, connection wait timeout: {}, max failover tries: {}", pool_size, pool_wait_timeout, max_tries_); auto db = config.getString(config_prefix + ".db", ""); @@ -66,13 +66,13 @@ PoolWithFailover::PoolWithFailover( : pool_wait_timeout(pool_wait_timeout_) , max_tries(max_tries_) { - LOG_TRACE(&Poco::Logger::get("PostgreSQLConnectionPool"), "PostgreSQL connection pool size: {}, connection wait timeout: {}, max failover tries: {}", + LOG_TRACE(getLogger("PostgreSQLConnectionPool"), "PostgreSQL connection pool size: {}, connection wait timeout: {}, max failover tries: {}", pool_size, pool_wait_timeout, max_tries_); /// Replicas have the same priority, but traversed replicas are moved to the end of the queue. for (const auto & [host, port] : addresses) { - LOG_DEBUG(&Poco::Logger::get("PostgreSQLPoolWithFailover"), "Adding address host: {}, port: {} to connection pool", host, port); + LOG_DEBUG(getLogger("PostgreSQLPoolWithFailover"), "Adding address host: {}, port: {} to connection pool", host, port); auto connection_string = formatConnectionString(database, host, port, user, password).first; replicas_with_priority[0].emplace_back(connection_string, pool_size); } diff --git a/src/Core/PostgreSQL/PoolWithFailover.h b/src/Core/PostgreSQL/PoolWithFailover.h index f4ae2c6cd1..5d580b6771 100644 --- a/src/Core/PostgreSQL/PoolWithFailover.h +++ b/src/Core/PostgreSQL/PoolWithFailover.h @@ -1,5 +1,6 @@ #pragma once +#include #include "ConnectionHolder.h" #include #include @@ -57,7 +58,7 @@ private: size_t pool_wait_timeout; size_t max_tries; std::mutex mutex; - Poco::Logger * log = &Poco::Logger::get("PostgreSQLConnectionPool"); + LoggerPtr log = getLogger("PostgreSQLConnectionPool"); }; using PoolWithFailoverPtr = std::shared_ptr; diff --git a/src/Core/PostgreSQLProtocol.h b/src/Core/PostgreSQLProtocol.h index 114abc0101..288bf34dca 100644 --- a/src/Core/PostgreSQLProtocol.h +++ b/src/Core/PostgreSQLProtocol.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -883,7 +884,7 @@ public: class AuthenticationManager { private: - Poco::Logger * log = &Poco::Logger::get("AuthenticationManager"); + LoggerPtr log = getLogger("AuthenticationManager"); std::unordered_map> type_to_method = {}; public: diff --git a/src/Core/ServerUUID.cpp b/src/Core/ServerUUID.cpp index 721c406ff5..2bb7cd3bfe 100644 --- a/src/Core/ServerUUID.cpp +++ b/src/Core/ServerUUID.cpp @@ -13,7 +13,7 @@ namespace ErrorCodes extern const int CANNOT_CREATE_FILE; } -void ServerUUID::load(const fs::path & server_uuid_file, Poco::Logger * log) +void ServerUUID::load(const fs::path & server_uuid_file, LoggerPtr log) { /// Write a uuid file containing a unique uuid if the file doesn't already exist during server start. diff --git a/src/Core/ServerUUID.h b/src/Core/ServerUUID.h index 36bbf0e631..e779c435a9 100644 --- a/src/Core/ServerUUID.h +++ b/src/Core/ServerUUID.h @@ -1,4 +1,5 @@ #pragma once +#include #include #include @@ -20,7 +21,7 @@ public: static UUID get() { return server_uuid; } /// Loads server UUID from file or creates new one. Should be called on daemon startup. - static void load(const fs::path & server_uuid_file, Poco::Logger * log); + static void load(const fs::path & server_uuid_file, LoggerPtr log); }; } diff --git a/src/Core/SettingsQuirks.cpp b/src/Core/SettingsQuirks.cpp index f969c42680..23d0be92b1 100644 --- a/src/Core/SettingsQuirks.cpp +++ b/src/Core/SettingsQuirks.cpp @@ -13,7 +13,7 @@ /// /// [1]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=339ddb53d373 /// [2]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=0c54a6a44bf3 -bool nestedEpollWorks(Poco::Logger * log) +bool nestedEpollWorks(LoggerPtr log) { if (Poco::Environment::os() != POCO_OS_LINUX) return true; @@ -35,7 +35,7 @@ namespace DB { /// Update some settings defaults to avoid some known issues. -void applySettingsQuirks(Settings & settings, Poco::Logger * log) +void applySettingsQuirks(Settings & settings, LoggerPtr log) { if (!nestedEpollWorks(log)) { diff --git a/src/Core/SettingsQuirks.h b/src/Core/SettingsQuirks.h index 38def8eebf..f6b2a4e33f 100644 --- a/src/Core/SettingsQuirks.h +++ b/src/Core/SettingsQuirks.h @@ -1,9 +1,6 @@ #pragma once -namespace Poco -{ -class Logger; -} +#include namespace DB { @@ -11,6 +8,6 @@ namespace DB struct Settings; /// Update some settings defaults to avoid some known issues. -void applySettingsQuirks(Settings & settings, Poco::Logger * log = nullptr); +void applySettingsQuirks(Settings & settings, LoggerPtr log = nullptr); } diff --git a/src/DaemonManager/BGJobStatusInCatalog.cpp b/src/DaemonManager/BGJobStatusInCatalog.cpp index f0a5b0fcb7..fb7bf11483 100644 --- a/src/DaemonManager/BGJobStatusInCatalog.cpp +++ b/src/DaemonManager/BGJobStatusInCatalog.cpp @@ -62,8 +62,8 @@ char serializeToChar(CnchBGThreadStatus status) CatalogBGJobStatusPersistentStoreProxy::CatalogBGJobStatusPersistentStoreProxy( std::shared_ptr catalog_, CnchBGThreadType type_, - Poco::Logger * log_) - : catalog{std::move(catalog_)}, statuses_cache{}, type{type_}, log{log_} + LoggerPtr log_) + : catalog{std::move(catalog_)}, type{type_}, log{std::move(log_)} {} std::optional CatalogBGJobStatusPersistentStoreProxy::createStatusIfNotExist(const UUID & uuid, CnchBGThreadStatus init_status) const @@ -126,7 +126,7 @@ IBGJobStatusPersistentStoreProxy::CacheClearer CatalogBGJobStatusPersistentStore if (catalog) // catalog is nullptr in unittest statuses_cache = catalog->getBGJobStatuses(type); - + is_cache_prefetched = true; return CacheClearer{this}; } diff --git a/src/DaemonManager/BGJobStatusInCatalog.h b/src/DaemonManager/BGJobStatusInCatalog.h index 61f7690e49..1daad8fe1e 100644 --- a/src/DaemonManager/BGJobStatusInCatalog.h +++ b/src/DaemonManager/BGJobStatusInCatalog.h @@ -13,6 +13,7 @@ * limitations under the License. */ +#include #include #include #include @@ -62,7 +63,7 @@ namespace BGJobStatusInCatalog class CatalogBGJobStatusPersistentStoreProxy : public IBGJobStatusPersistentStoreProxy { public: - CatalogBGJobStatusPersistentStoreProxy(std::shared_ptr, CnchBGThreadType, Poco::Logger *log); + CatalogBGJobStatusPersistentStoreProxy(std::shared_ptr, CnchBGThreadType, LoggerPtr); std::optional createStatusIfNotExist(const UUID & uuid, CnchBGThreadStatus init_status) const override; void setStatus(const UUID & table_uuid, CnchBGThreadStatus status) const override; @@ -71,11 +72,11 @@ namespace BGJobStatusInCatalog protected: void clearCache() override; /// keep member variables protected for testing - std::shared_ptr catalog; std::unordered_map statuses_cache; + std::shared_ptr catalog; bool is_cache_prefetched = false; CnchBGThreadType type; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/DaemonManager/BackgroudJobExecutor.cpp b/src/DaemonManager/BackgroudJobExecutor.cpp index e24e5d8a11..1f54165b45 100644 --- a/src/DaemonManager/BackgroudJobExecutor.cpp +++ b/src/DaemonManager/BackgroudJobExecutor.cpp @@ -27,7 +27,7 @@ void executeServerBGThreadAction(const StorageID & storage_id, const String & ho { CnchServerClientPtr server_client = context.getCnchServerClient(host_port); server_client->controlCnchBGThread(storage_id, type, action); - LOG_DEBUG(&Poco::Logger::get(__func__), "Succeed to {} thread for {} on {}", + LOG_DEBUG(getLogger(__func__), "Succeed to {} thread for {} on {}", toString(action), storage_id.getNameForLogs(), host_port); } diff --git a/src/DaemonManager/BackgroundJob.h b/src/DaemonManager/BackgroundJob.h index 017e929a1a..ab5dc3bc72 100644 --- a/src/DaemonManager/BackgroundJob.h +++ b/src/DaemonManager/BackgroundJob.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -168,7 +169,7 @@ private: CnchBGThreadStatus expected_status; String host_port; std::time_t last_start_time = 0; - Poco::Logger * log; + LoggerPtr log; mutable std::mutex mutex; }; diff --git a/src/DaemonManager/DaemonHelper.cpp b/src/DaemonManager/DaemonHelper.cpp index 29eaa342ab..c4281e0582 100644 --- a/src/DaemonManager/DaemonHelper.cpp +++ b/src/DaemonManager/DaemonHelper.cpp @@ -66,7 +66,7 @@ std::map updateConfig( return std::move(default_config); } -void printConfig(std::map & config, Poco::Logger * log) +void printConfig(std::map & config, LoggerPtr log) { std::ostringstream oss; std::for_each(config.begin(), config.end(), diff --git a/src/DaemonManager/DaemonHelper.h b/src/DaemonManager/DaemonHelper.h index ecb66cab74..22a07465f8 100644 --- a/src/DaemonManager/DaemonHelper.h +++ b/src/DaemonManager/DaemonHelper.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include @@ -26,7 +27,7 @@ namespace DB::DaemonManager return cnch_table != nullptr; } - void printConfig(std::map & config, Poco::Logger * log); + void printConfig(std::map & config, LoggerPtr log); std::map updateConfig( std::map && default_config, diff --git a/src/DaemonManager/DaemonJob.h b/src/DaemonManager/DaemonJob.h index 0363271e33..0acfe3dbb1 100644 --- a/src/DaemonManager/DaemonJob.h +++ b/src/DaemonManager/DaemonJob.h @@ -16,6 +16,7 @@ #pragma once #include +#include #include #include @@ -39,7 +40,7 @@ class DaemonJob : public IDaemonJob, public WithMutableContext { public: DaemonJob(ContextMutablePtr global_context_, CnchBGThreadType type_) - : WithMutableContext(global_context_), type{type_}, log(&Poco::Logger::get(toString(type))) + : WithMutableContext(global_context_), type{type_}, log(getLogger(toString(type))) {} void init() override; @@ -47,13 +48,13 @@ public: void stop() override final; bool suspended() const override { return false; } CnchBGThreadType getType() const { return type; } - Poco::Logger * getLog() { return log; } + LoggerPtr getLog() { return log; } protected: void execute() override final; virtual bool executeImpl() = 0; const CnchBGThreadType type; - Poco::Logger * log; + LoggerPtr log; private: BackgroundSchedulePool::TaskHolder task; }; diff --git a/src/DaemonManager/DaemonJobAutoStatistics.h b/src/DaemonManager/DaemonJobAutoStatistics.h index 515922ea8c..dc6a0b814e 100644 --- a/src/DaemonManager/DaemonJobAutoStatistics.h +++ b/src/DaemonManager/DaemonJobAutoStatistics.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -31,7 +32,7 @@ public: static HostWithPortsVec getServerList(const ContextPtr & ctx); private: - Poco::Logger * logger = &Poco::Logger::get("AutoStatsDaemon"); + LoggerPtr logger = getLogger("AutoStatsDaemon"); }; } diff --git a/src/DaemonManager/DaemonJobGlobalGC.cpp b/src/DaemonManager/DaemonJobGlobalGC.cpp index 28c9817c43..8c57d59f94 100644 --- a/src/DaemonManager/DaemonJobGlobalGC.cpp +++ b/src/DaemonManager/DaemonJobGlobalGC.cpp @@ -83,7 +83,7 @@ bool sendToServerForGC( std::vector> & num_of_table_can_send_sorted, const std::vector & server_clients, ToServerForGCSender sender, - Poco::Logger * log) + LoggerPtr log) { LOG_DEBUG(log, "send {} table to server for GC, they are", tables_need_gc.size()); for (size_t i = 0; i < tables_need_gc.size(); ++i) @@ -135,7 +135,7 @@ bool sendToServerForGC( std::vector getServerClients( const Context & context, CnchTopologyMaster & topology_master, - Poco::Logger * log) + LoggerPtr log) { std::vector res; std::list server_topologies = topology_master.getCurrentTopology(); @@ -162,7 +162,7 @@ std::vector getServerClients( std::vector> getNumOfTablesCanSend( const std::vector & clients, - Poco::Logger * log) + LoggerPtr log) { std::vector> res; for (const auto & client : clients) @@ -194,7 +194,7 @@ std::vector> getNumOfTablesCanSend( std::set getDeletingTablesFromServers( const std::vector & clients, - Poco::Logger * log) + LoggerPtr log) { std::set uuids; for (const auto & client : clients) diff --git a/src/DaemonManager/DaemonJobGlobalGC.h b/src/DaemonManager/DaemonJobGlobalGC.h index 72a7d2a253..70a459532b 100644 --- a/src/DaemonManager/DaemonJobGlobalGC.h +++ b/src/DaemonManager/DaemonJobGlobalGC.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include @@ -56,7 +57,7 @@ bool sendToServerForGC( std::vector> & num_of_table_can_send_sorted, const std::vector & server_clients, ToServerForGCSender sender, - Poco::Logger * log); + LoggerPtr log); std::vector> sortByValue( std::vector> && num_of_table_can_send); diff --git a/src/DaemonManager/DaemonJobServerBGThread.cpp b/src/DaemonManager/DaemonJobServerBGThread.cpp index 54f8efca4e..95009828f6 100644 --- a/src/DaemonManager/DaemonJobServerBGThread.cpp +++ b/src/DaemonManager/DaemonJobServerBGThread.cpp @@ -61,7 +61,7 @@ std::unordered_map getUUIDsFromCatalog(DaemonJobServerBGThread { const Context & context = *daemon_job.getContext(); std::unordered_map ret; - Poco::Logger * log = daemon_job.getLog(); + LoggerPtr log = daemon_job.getLog(); if (daemon_job.getType() == CnchBGThreadType::MaterializedMySQL) { @@ -148,7 +148,7 @@ std::unordered_map getUUIDsFromCatalog(DaemonJobServerBGThread } -const std::vector getServersInTopology(Context & context, Poco::Logger * log) +const std::vector getServersInTopology(Context & context, LoggerPtr log) { std::vector ret; std::shared_ptr topology_master = context.getCnchTopologyMaster(); @@ -193,7 +193,7 @@ bool checkIfServerDied(const std::vector & alive_host_port, const String std::find(alive_host_port.begin(), alive_host_port.end(), host_port)); } -std::map fetchServerStartTimes(Context & context, CnchTopologyMaster & topology_master, Poco::Logger * log) +std::map fetchServerStartTimes(Context & context, CnchTopologyMaster & topology_master, LoggerPtr log) { std::map ret; std::list server_topologies = topology_master.getCurrentTopology(); @@ -257,7 +257,7 @@ std::unordered_map getAllTargetServerForBGJob( UInt64 ts, DaemonJobServerBGThread & daemon_job) { - Poco::Logger * log = daemon_job.getLog(); + LoggerPtr log = daemon_job.getLog(); std::unordered_map ret; for (const auto & p : bg_jobs) { @@ -394,7 +394,7 @@ void syncServerBGJob( BackgroundJobPtr & job_from_dm, std::vector jobs_from_server) { - Poco::Logger * log = daemon_job.getLog(); + LoggerPtr log = daemon_job.getLog(); StorageID storage_id = job_from_dm->getStorageID(); String job_from_dm_host_port = job_from_dm->getHostPort(); CnchBGThreadStatus job_from_dm_status = job_from_dm->getJobStatus(); @@ -467,7 +467,7 @@ void runMissingAndRemoveDuplicateJob( BackgroundJobs & check_jobs, const std::unordered_multimap & jobs_from_server) { - Poco::Logger * log = daemon_job.getLog(); + LoggerPtr log = daemon_job.getLog(); std::for_each(check_jobs.begin(), check_jobs.end(), [& jobs_from_server, & log, & daemon_job] (auto & p) { @@ -534,7 +534,7 @@ void removeZombieJobsInServer( const std::vector & zombie_jobs ) { - Poco::Logger * log = daemon_job.getLog(); + LoggerPtr log = daemon_job.getLog(); std::for_each(zombie_jobs.begin(), zombie_jobs.end(), [&] (const BGJobInfoFromServer & j) { LOG_INFO(log, "Will drop zombie thread for job type {}, table {} on host {}", toString(daemon_job.getType()), j.storage_id.getNameForLogs(), j.host_port); @@ -546,7 +546,7 @@ void removeZombieJobsInServer( std::optional> fetchBGThreadFromServer( Context & context, CnchBGThreadType type, - Poco::Logger * log, + LoggerPtr log, const std::vector & servers ) { @@ -594,7 +594,7 @@ size_t checkLivenessIfNeed( ) { const CnchBGThreadType type = daemon_job.getType(); - Poco::Logger * log = daemon_job.getLog(); + LoggerPtr log = daemon_job.getLog(); if ((counter % liveness_check_interval) != 0) return counter + 1; @@ -876,7 +876,7 @@ Result DaemonJobServerBGThread::executeJobAction(const StorageID & storage_id, C { CnchServerClientPtr server_client = context.getCnchServerClient(host_port); server_client->controlCnchBGThread(StorageID::createEmpty(), type, action); - LOG_DEBUG(&Poco::Logger::get(__func__), "Succeed to {} all threads on {}", + LOG_DEBUG(getLogger(__func__), "Succeed to {} all threads on {}", toString(action), host_port); } @@ -1169,7 +1169,7 @@ void registerServerBGThreads(DaemonFactory & factory) void fixKafkaActiveStatuses(DaemonJobServerBGThread * daemon_job) { - Poco::Logger * log = daemon_job->getLog(); + LoggerPtr log = daemon_job->getLog(); ContextMutablePtr context = daemon_job->getContext(); std::shared_ptr catalog = context->getCnchCatalog(); auto data_models = catalog->getAllTables(); diff --git a/src/DaemonManager/DaemonJobServerBGThread.h b/src/DaemonManager/DaemonJobServerBGThread.h index d956cc6c56..ee90105f05 100644 --- a/src/DaemonManager/DaemonJobServerBGThread.h +++ b/src/DaemonManager/DaemonJobServerBGThread.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -107,7 +108,7 @@ struct BGJobInfoFromServer using BGJobsFromServersFetcher = std::function>( Context &, CnchBGThreadType, - Poco::Logger *, + LoggerPtr, const std::vector & )>; diff --git a/src/DaemonManager/DaemonJobTxnGC.h b/src/DaemonManager/DaemonJobTxnGC.h index b670b91681..b20a42b2bb 100644 --- a/src/DaemonManager/DaemonJobTxnGC.h +++ b/src/DaemonManager/DaemonJobTxnGC.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -26,7 +27,7 @@ namespace DB::DaemonManager class TxnGCLog { public: - TxnGCLog(Poco::Logger * lg) : log(lg) { } + TxnGCLog(LoggerPtr lg) : log(lg) { } TxnGCLog(const TxnGCLog &) = delete; TxnGCLog & operator=(const TxnGCLog &) = delete; ~TxnGCLog() @@ -59,7 +60,7 @@ public: std::atomic inactive{0}; private: - Poco::Logger * log; + LoggerPtr log; }; class DaemonJobTxnGC : public DaemonJob { diff --git a/src/DaemonManager/DaemonManager.cpp b/src/DaemonManager/DaemonManager.cpp index e24ec6166b..1d9389e9d4 100644 --- a/src/DaemonManager/DaemonManager.cpp +++ b/src/DaemonManager/DaemonManager.cpp @@ -46,8 +46,6 @@ using namespace std::chrono_literals; #define DAEMON_MANAGER_VERSION "1.0.0" -using Poco::Logger; - namespace brpc { namespace policy @@ -142,7 +140,7 @@ void DaemonManager::defineOptions(Poco::Util::OptionSet & options) std::vector createLocalDaemonJobs( const Poco::Util::AbstractConfiguration & app_config, ContextMutablePtr global_context, - Logger * log) + LoggerPtr log) { std::map default_config = { { "GLOBAL_GC", 5000}, @@ -171,7 +169,7 @@ std::vector createLocalDaemonJobs( std::unordered_map createDaemonJobsForBGThread( const Poco::Util::AbstractConfiguration & app_config, ContextMutablePtr global_context, - Logger * log) + LoggerPtr log) { std::unordered_map res; std::map default_config = { @@ -221,7 +219,7 @@ int DaemonManager::main(const std::vector &) if (consul_http_host != nullptr && consul_http_port != nullptr) brpc::policy::FLAGS_consul_agent_addr = "http://" + createHostPortString(consul_http_host, consul_http_port); - Logger * log = &logger(); + auto log = getLogger("DM"); LOG_INFO(log, "Daemon Manager start up..."); /** Context contains all that query execution is dependent: @@ -387,7 +385,7 @@ int DaemonManager::main(const std::vector &) } ); - auto fix_metadata_task = global_context->getSchedulePool().createTask("Fix catalog metadata", [& log, this] () { fixCatalogMetaData(global_context, log); }); + auto fix_metadata_task = global_context->getSchedulePool().createTask("Fix catalog metadata", [log, this] () { fixCatalogMetaData(global_context, log); }); fix_metadata_task->activateAndSchedule(); waitForTerminationRequest(); diff --git a/src/DaemonManager/DaemonManager.h b/src/DaemonManager/DaemonManager.h index fe2fb9c014..117d6c30d5 100644 --- a/src/DaemonManager/DaemonManager.h +++ b/src/DaemonManager/DaemonManager.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include diff --git a/src/DaemonManager/DaemonManagerServiceImpl.h b/src/DaemonManager/DaemonManagerServiceImpl.h index 24acdf517d..34ff4cc61e 100644 --- a/src/DaemonManager/DaemonManagerServiceImpl.h +++ b/src/DaemonManager/DaemonManagerServiceImpl.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include @@ -54,7 +55,7 @@ public: private: std::unordered_map daemon_jobs; - Poco::Logger * log = &Poco::Logger::get("DaemonManagerRPCService"); + LoggerPtr log = getLogger("DaemonManagerRPCService"); }; using DaemonManagerServicePtr = std::shared_ptr; diff --git a/src/DaemonManager/FixCatalogMetaDataTask.cpp b/src/DaemonManager/FixCatalogMetaDataTask.cpp index c320c2319c..65e4eecbaa 100644 --- a/src/DaemonManager/FixCatalogMetaDataTask.cpp +++ b/src/DaemonManager/FixCatalogMetaDataTask.cpp @@ -43,7 +43,7 @@ void createMissingUUIDDictionaryModel(ContextPtr context) } -void fixDictionary(Catalog::Catalog * catalog, Poco::Logger * log) +void fixDictionary(Catalog::Catalog * catalog, LoggerPtr log) { Catalog::Catalog::DataModelDictionaries all = catalog->getAllDictionaries(); std::for_each(all.begin(), all.end(), @@ -60,7 +60,7 @@ void fixDictionary(Catalog::Catalog * catalog, Poco::Logger * log) }); } -void fixCatalogMetaData(ContextPtr context, Poco::Logger * log) +void fixCatalogMetaData(ContextPtr context, LoggerPtr log) { LOG_INFO(log, "execute fixing Catalog Metadata task"); std::shared_ptr catalog = context->getCnchCatalog(); diff --git a/src/DaemonManager/FixCatalogMetaDataTask.h b/src/DaemonManager/FixCatalogMetaDataTask.h index a481f80ffb..403b0a7d15 100644 --- a/src/DaemonManager/FixCatalogMetaDataTask.h +++ b/src/DaemonManager/FixCatalogMetaDataTask.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -24,6 +25,6 @@ namespace DB namespace DaemonManager { UUID getUUIDFromCreateQuery(const DB::Protos::DataModelDictionary & d); -void fixCatalogMetaData(ContextPtr context, Poco::Logger * log); +void fixCatalogMetaData(ContextPtr context, LoggerPtr log); } } diff --git a/src/DaemonManager/TargetServerCalculator.cpp b/src/DaemonManager/TargetServerCalculator.cpp index 871310bb3e..b5633667c3 100644 --- a/src/DaemonManager/TargetServerCalculator.cpp +++ b/src/DaemonManager/TargetServerCalculator.cpp @@ -32,7 +32,7 @@ namespace ErrorCodes namespace DaemonManager { -TargetServerCalculator::TargetServerCalculator(Context & context_, CnchBGThreadType type_, Poco::Logger * log_) +TargetServerCalculator::TargetServerCalculator(Context & context_, CnchBGThreadType type_, LoggerPtr log_) : type{type_}, context(context_), log{log_} {} diff --git a/src/DaemonManager/TargetServerCalculator.h b/src/DaemonManager/TargetServerCalculator.h index c36f8285d4..4f2778f8bb 100644 --- a/src/DaemonManager/TargetServerCalculator.h +++ b/src/DaemonManager/TargetServerCalculator.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -32,14 +33,14 @@ public: class TargetServerCalculator : public ITargetServerCalculator { public: - TargetServerCalculator(Context & context, CnchBGThreadType type, Poco::Logger * log); + TargetServerCalculator(Context & context, CnchBGThreadType type, LoggerPtr log); CnchServerClientPtr getTargetServer(const StorageID &, UInt64) const override; private: CnchServerClientPtr getTargetServerForCnchMergeTree(const StorageID &, UInt64) const; CnchServerClientPtr getTargetServerForCnchKafka(const StorageID &, UInt64) const; const CnchBGThreadType type; Context & context; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/DaemonManager/tests/gtest_bg_job_status_in_catalog.cpp b/src/DaemonManager/tests/gtest_bg_job_status_in_catalog.cpp index 29ef6b78cd..d077264f64 100644 --- a/src/DaemonManager/tests/gtest_bg_job_status_in_catalog.cpp +++ b/src/DaemonManager/tests/gtest_bg_job_status_in_catalog.cpp @@ -72,7 +72,7 @@ public: TEST(BGJobStatusInCatalogTest, test_CatalogBGJobStatusPersistentStoreProxy) { - DummyProxy proxy{nullptr, CnchBGThreadType::MergeMutate, &Poco::Logger::get("test_CatalogBGJobStatusPersistentStoreProxy")}; + DummyProxy proxy{nullptr, CnchBGThreadType::MergeMutate, getLogger("test_CatalogBGJobStatusPersistentStoreProxy")}; UUID uuid{UInt128{0, 1}}; EXPECT_THROW(proxy.getStatus(uuid, false), Exception); EXPECT_THROW(proxy.getStatus(uuid, true), Exception); diff --git a/src/DaemonManager/tests/gtest_daemon_job.cpp b/src/DaemonManager/tests/gtest_daemon_job.cpp index 558191af3a..2d961ab735 100644 --- a/src/DaemonManager/tests/gtest_daemon_job.cpp +++ b/src/DaemonManager/tests/gtest_daemon_job.cpp @@ -395,7 +395,7 @@ TEST(daemon_job, checkIfServerDiedFalseCase) TEST(daemon_job, checkLivenessIfNeed_counter_test_checking_at_right_time) { auto fetch_bg_thread_from_server = [] - (Context &, CnchBGThreadType, Poco::Logger *, const std::vector &) + (Context &, CnchBGThreadType, LoggerPtr, const std::vector &) -> std::optional> { return std::unordered_multimap{}; @@ -422,7 +422,7 @@ TEST(daemon_job, checkLivenessIfNeed_counter_test_checking_at_right_time) TEST(daemon_job, checkLivenessIfNeed_counter_test_not_checking_when_not_in_time) { auto fetch_bg_thread_from_server = [] - (Context &, CnchBGThreadType, Poco::Logger *, const std::vector &) + (Context &, CnchBGThreadType, LoggerPtr, const std::vector &) -> std::optional> { return std::unordered_multimap{}; @@ -449,7 +449,7 @@ TEST(daemon_job, checkLivenessIfNeed_counter_test_not_checking_when_not_in_time) TEST(daemon_job, checkLivenessIfNeed_counter_test_fetch_from_server_failed) { auto fetch_bg_thread_from_server = [] - (Context &, CnchBGThreadType, Poco::Logger *, const std::vector &) + (Context &, CnchBGThreadType, LoggerPtr, const std::vector &) -> std::optional> { return {}; @@ -684,7 +684,7 @@ TEST(daemon_job, runMissingAndRemoveDuplicateJob_correct_case3) TEST(daemon_job, checkLiveness_run_missing) { auto fetch_bg_thread_from_server = [] - (Context &, CnchBGThreadType, Poco::Logger *, const std::vector &) + (Context &, CnchBGThreadType, LoggerPtr, const std::vector &) -> std::optional> { return std::unordered_multimap{ diff --git a/src/DaemonManager/tests/gtest_daemon_job_global_gc.cpp b/src/DaemonManager/tests/gtest_daemon_job_global_gc.cpp index 9c6d4d10ff..3a69ffdb8f 100644 --- a/src/DaemonManager/tests/gtest_daemon_job_global_gc.cpp +++ b/src/DaemonManager/tests/gtest_daemon_job_global_gc.cpp @@ -84,7 +84,7 @@ std::vector createTables(unsigned int begin, unsigned in TEST(DaemonJobGlobalGC, sendToServerForGC_test) { using DB::DaemonManager::GlobalGCHelpers::sendToServerForGC; - Poco::Logger * log = &Poco::Logger::get("test_log"); + LoggerPtr log = getLogger("test_log"); std::vector hps { {"127.0.0.1", 1606, 1224, 1225}, {"127.0.0.2", 1606, 1224, 1225}, diff --git a/src/DataStreams/ColumnGathererStream.cpp b/src/DataStreams/ColumnGathererStream.cpp index adefe90c8a..71cb901f84 100644 --- a/src/DataStreams/ColumnGathererStream.cpp +++ b/src/DataStreams/ColumnGathererStream.cpp @@ -53,7 +53,7 @@ ColumnGathererStream::ColumnGathererStream( , low_cardinality_fallback_threshold(fallback_threshold_) , block_preferred_size_rows(block_preferred_size_rows_) , block_preferred_size_bytes(block_preferred_size_bytes_) - , log(&Poco::Logger::get("ColumnGathererStream")) + , log(getLogger("ColumnGathererStream")) { if (source_streams.empty()) throw Exception("There are no streams to gather", ErrorCodes::EMPTY_DATA_PASSED); diff --git a/src/DataStreams/ColumnGathererStream.h b/src/DataStreams/ColumnGathererStream.h index dc72733649..f2ae6cba69 100644 --- a/src/DataStreams/ColumnGathererStream.h +++ b/src/DataStreams/ColumnGathererStream.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -155,7 +156,7 @@ private: Block output_block; MutableColumnPtr cardinality_dict = nullptr; - Poco::Logger * log; + LoggerPtr log; }; template diff --git a/src/DataStreams/MergingSortedBlockInputStream.cpp b/src/DataStreams/MergingSortedBlockInputStream.cpp index b7396a23d6..f1e8c431ac 100644 --- a/src/DataStreams/MergingSortedBlockInputStream.cpp +++ b/src/DataStreams/MergingSortedBlockInputStream.cpp @@ -21,7 +21,7 @@ MergingSortedBlockInputStream::MergingSortedBlockInputStream( : description(std::move(description_)), max_block_size(max_block_size_), limit(limit_), quiet(quiet_) , source_blocks(inputs_.size()) , cursors(inputs_.size()), out_row_sources_buf(out_row_sources_buf_) - , log(&Poco::Logger::get("MergingSortedBlockInputStream")) + , log(getLogger("MergingSortedBlockInputStream")) { children.insert(children.end(), inputs_.begin(), inputs_.end()); header = children.at(0)->getHeader(); diff --git a/src/DataStreams/MergingSortedBlockInputStream.h b/src/DataStreams/MergingSortedBlockInputStream.h index 582b41ff3a..ce26ed1f2c 100644 --- a/src/DataStreams/MergingSortedBlockInputStream.h +++ b/src/DataStreams/MergingSortedBlockInputStream.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -78,7 +79,7 @@ private: template void merge(MutableColumns & merged_columns, TSortingHeap & queue); - Poco::Logger * log; + LoggerPtr log; /// Read is finished. bool finished = false; diff --git a/src/DataStreams/OwningBlockInputStream.h b/src/DataStreams/OwningBlockInputStream.h index 804dc9fefd..a374b04bcf 100644 --- a/src/DataStreams/OwningBlockInputStream.h +++ b/src/DataStreams/OwningBlockInputStream.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -27,7 +28,7 @@ public: { children.clear(); if (stream.use_count() > 1) - LOG_WARNING(&Poco::Logger::get("OwningBlockInputStream"), "The BlockInputStream might outlive the buffer!"); + LOG_WARNING(getLogger("OwningBlockInputStream"), "The BlockInputStream might outlive the buffer!"); if (stream) stream.reset(); if (own) diff --git a/src/DataStreams/ParallelInputsProcessor.h b/src/DataStreams/ParallelInputsProcessor.h index 6633acf9d3..5932de0a0c 100644 --- a/src/DataStreams/ParallelInputsProcessor.h +++ b/src/DataStreams/ParallelInputsProcessor.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -365,7 +366,7 @@ private: /// Wait for the completion of all threads. std::atomic joined_threads { false }; - Poco::Logger * log = &Poco::Logger::get("ParallelInputsProcessor"); + LoggerPtr log = getLogger("ParallelInputsProcessor"); }; diff --git a/src/DataStreams/PushingToViewsBlockOutputStream.cpp b/src/DataStreams/PushingToViewsBlockOutputStream.cpp index 2868691fa8..e99fd56501 100644 --- a/src/DataStreams/PushingToViewsBlockOutputStream.cpp +++ b/src/DataStreams/PushingToViewsBlockOutputStream.cpp @@ -59,7 +59,7 @@ PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream( : WithContext(context_) , storage(storage_) , metadata_snapshot(metadata_snapshot_) - , log(&Poco::Logger::get("PushingToViewsBlockOutputStream")) + , log(getLogger("PushingToViewsBlockOutputStream")) , query_ptr(query_ptr_) { checkStackSize(); diff --git a/src/DataStreams/PushingToViewsBlockOutputStream.h b/src/DataStreams/PushingToViewsBlockOutputStream.h index 8f44076cb7..e327acaae1 100644 --- a/src/DataStreams/PushingToViewsBlockOutputStream.h +++ b/src/DataStreams/PushingToViewsBlockOutputStream.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -60,7 +61,7 @@ private: StorageMetadataPtr metadata_snapshot; BlockOutputStreamPtr output; ReplicatedMergeTreeBlockOutputStream * replicated_output = nullptr; - Poco::Logger * log; + LoggerPtr log; ASTPtr query_ptr; Stopwatch main_watch; diff --git a/src/DataStreams/RemoteBlockInputStream.h b/src/DataStreams/RemoteBlockInputStream.h index 82d139714b..2ca2907cf1 100644 --- a/src/DataStreams/RemoteBlockInputStream.h +++ b/src/DataStreams/RemoteBlockInputStream.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include @@ -97,7 +98,7 @@ protected: private: RemoteQueryExecutor query_executor; - Poco::Logger * log = &Poco::Logger::get("RemoteBlockInputStream"); + LoggerPtr log = getLogger("RemoteBlockInputStream"); void init(); }; diff --git a/src/DataStreams/RemoteQueryExecutor.h b/src/DataStreams/RemoteQueryExecutor.h index a50f83dce7..834b81a765 100644 --- a/src/DataStreams/RemoteQueryExecutor.h +++ b/src/DataStreams/RemoteQueryExecutor.h @@ -21,6 +21,8 @@ #pragma once +#include +#include #include #include #include @@ -29,7 +31,6 @@ #include #include #include -#include namespace DB { @@ -93,7 +94,7 @@ public: /// Query is resent to a replica, the query itself can be modified. std::atomic resent_query { false }; - + /// Read next block of data. Returns empty block if query is finished. Block read(); @@ -128,7 +129,7 @@ public: void setMainTable(StorageID main_table_) { main_table = std::move(main_table_); } - void setLogger(Poco::Logger * logger) { log = logger; } + void setLogger(LoggerPtr logger) { log = logger; } const Block & getHeader() const { return header; } @@ -211,7 +212,7 @@ private: bool is_server_forwarding {false}; - Poco::Logger * log = nullptr; + LoggerPtr log = nullptr; /// Send all scalars to remote servers void sendScalars(); diff --git a/src/DataStreams/TTLBlockInputStream.cpp b/src/DataStreams/TTLBlockInputStream.cpp index d33cd71f53..c31edae0ca 100644 --- a/src/DataStreams/TTLBlockInputStream.cpp +++ b/src/DataStreams/TTLBlockInputStream.cpp @@ -45,7 +45,7 @@ TTLBlockInputStream::TTLBlockInputStream( time_t current_time_, bool force_) : data_part(data_part_) - , log(&Poco::Logger::get(storage_.getLogName() + " (TTLBlockInputStream)")) + , log(getLogger(storage_.getLogName() + " (TTLBlockInputStream)")) { children.push_back(input_); header = children.at(0)->getHeader(); diff --git a/src/DataStreams/TTLBlockInputStream.h b/src/DataStreams/TTLBlockInputStream.h index 311a46d0c9..3175c73e72 100644 --- a/src/DataStreams/TTLBlockInputStream.h +++ b/src/DataStreams/TTLBlockInputStream.h @@ -20,6 +20,7 @@ */ #pragma once +#include #include #include #include @@ -61,7 +62,7 @@ private: /// ttl_infos and empty_columns are updating while reading const MergeTreeMetaBase::MutableDataPartPtr & data_part; - Poco::Logger * log; + LoggerPtr log; Block header; }; diff --git a/src/DataStreams/UnionBlockInputStream.h b/src/DataStreams/UnionBlockInputStream.h index e249938187..d354045b8b 100644 --- a/src/DataStreams/UnionBlockInputStream.h +++ b/src/DataStreams/UnionBlockInputStream.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include @@ -276,7 +277,7 @@ private: bool started = false; bool all_read = false; - Poco::Logger * log = &Poco::Logger::get("UnionBlockInputStream"); + LoggerPtr log = getLogger("UnionBlockInputStream"); }; } diff --git a/src/DataTypes/Serializations/SerializationMap.cpp b/src/DataTypes/Serializations/SerializationMap.cpp index 855f03fc25..4ebc4590aa 100644 --- a/src/DataTypes/Serializations/SerializationMap.cpp +++ b/src/DataTypes/Serializations/SerializationMap.cpp @@ -177,7 +177,7 @@ void SerializationMap::deserializeTextImpl(IColumn & column, ReadBuffer & istr, unlikely(current_key.size > settings.map.max_map_key_length)) { LOG_WARNING( - &Poco::Logger::get("SerializationMap"), + getLogger("SerializationMap"), "Key of map can not be longer than {}, discard key: {}", settings.map.max_map_key_length, current_key.toString()); diff --git a/src/Databases/DataLakes/DatabaseHive.h b/src/Databases/DataLakes/DatabaseHive.h index 0eba122e9a..5d9304ae33 100644 --- a/src/Databases/DataLakes/DatabaseHive.h +++ b/src/Databases/DataLakes/DatabaseHive.h @@ -1,5 +1,6 @@ #pragma once +#include #include #if USE_HIVE @@ -48,7 +49,7 @@ private: String database_name_in_hive; HiveMetastoreClientPtr metastore_client; - Poco::Logger * log{&Poco::Logger::get("DatabaseHive")}; + LoggerPtr log{getLogger("DatabaseHive")}; }; } diff --git a/src/Databases/DataLakes/DatabaseLakeBase.h b/src/Databases/DataLakes/DatabaseLakeBase.h index dd836d362f..a4b1a7e21c 100644 --- a/src/Databases/DataLakes/DatabaseLakeBase.h +++ b/src/Databases/DataLakes/DatabaseLakeBase.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -77,7 +78,7 @@ protected: CnchHiveSettingsPtr storage_settings; private: - Poco::Logger * log{&Poco::Logger::get("DatabaseLakeBase")}; + LoggerPtr log{getLogger("DatabaseLakeBase")}; }; class LakeDatabaseTablesIterator : public IDatabaseTablesIterator diff --git a/src/Databases/DataLakes/DatabasePaimon.h b/src/Databases/DataLakes/DatabasePaimon.h index 85b44feafa..39f433ad87 100644 --- a/src/Databases/DataLakes/DatabasePaimon.h +++ b/src/Databases/DataLakes/DatabasePaimon.h @@ -1,5 +1,6 @@ #pragma once +#include #include #if USE_HIVE and USE_JAVA_EXTENSIONS @@ -36,7 +37,7 @@ private: PaimonCatalogClientPtr catalog_client; - Poco::Logger * log{&Poco::Logger::get("DatabasePaimon")}; + LoggerPtr log{getLogger("DatabasePaimon")}; }; } diff --git a/src/Databases/DatabaseCnch.cpp b/src/Databases/DatabaseCnch.cpp index 4ad90d9a52..5a9db0a644 100644 --- a/src/Databases/DatabaseCnch.cpp +++ b/src/Databases/DatabaseCnch.cpp @@ -205,7 +205,7 @@ public: DatabaseCnch::DatabaseCnch(const String & name_, UUID uuid, ContextPtr local_context) : IDatabase(name_) , WithContext(local_context->getGlobalContext()) - , log(&Poco::Logger::get("DatabaseCnch (" + name_ + ")")) + , log(getLogger("DatabaseCnch (" + name_ + ")")) , db_uuid(uuid) { LOG_DEBUG(log, "Create database {} in query {}", database_name, local_context->getCurrentQueryId()); @@ -214,7 +214,7 @@ DatabaseCnch::DatabaseCnch(const String & name_, UUID uuid, ContextPtr local_con DatabaseCnch::DatabaseCnch(const String & name_, UUID uuid, const String & logger, ContextPtr local_context) : IDatabase(name_) , WithContext(local_context->getGlobalContext()) - , log(&Poco::Logger::get(logger)) + , log(getLogger(logger)) , db_uuid(uuid) { LOG_DEBUG(log, "Create database {} in query {}", database_name, local_context->getCurrentQueryId()); diff --git a/src/Databases/DatabaseCnch.h b/src/Databases/DatabaseCnch.h index abf542658d..d83373e75a 100644 --- a/src/Databases/DatabaseCnch.h +++ b/src/Databases/DatabaseCnch.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -98,7 +99,7 @@ protected: ASTPtr getCreateTableQueryImpl(const String & name, ContextPtr local_context, bool throw_on_error) const override; StoragePtr tryGetTableImpl(const String & name, ContextPtr local_context) const; - Poco::Logger * log; + LoggerPtr log; private: const UUID db_uuid; /// local storage cache, mapping from name->storage, mainly for select query diff --git a/src/Databases/DatabaseDictionary.cpp b/src/Databases/DatabaseDictionary.cpp index 97441969d7..9f95cdef91 100644 --- a/src/Databases/DatabaseDictionary.cpp +++ b/src/Databases/DatabaseDictionary.cpp @@ -69,7 +69,7 @@ namespace DatabaseDictionary::DatabaseDictionary(const String & name_, ContextPtr context_) : IDatabase(name_), WithContext(context_->getGlobalContext()) - , log(&Poco::Logger::get("DatabaseDictionary(" + database_name + ")")) + , log(getLogger("DatabaseDictionary(" + database_name + ")")) { } diff --git a/src/Databases/DatabaseDictionary.h b/src/Databases/DatabaseDictionary.h index 06402a9638..c112e2fad0 100644 --- a/src/Databases/DatabaseDictionary.h +++ b/src/Databases/DatabaseDictionary.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -48,7 +49,7 @@ protected: ASTPtr getCreateTableQueryImpl(const String & table_name, ContextPtr context, bool throw_on_error) const override; private: - Poco::Logger * log; + LoggerPtr log; Tables listTables(const FilterByNameFunction & filter_by_name); }; diff --git a/src/Databases/DatabaseExternalHive.cpp b/src/Databases/DatabaseExternalHive.cpp index 007b115286..fce740c91d 100644 --- a/src/Databases/DatabaseExternalHive.cpp +++ b/src/Databases/DatabaseExternalHive.cpp @@ -30,7 +30,7 @@ DatabaseExternalHive::DatabaseExternalHive(const String & catalog_, const String : IDatabase(name_) , WithContext(context_->getGlobalContext()) , hive_catalog_name(catalog_) - , log(&Poco::Logger::get("DatabaseExternalHive(" + database_name + ")")) + , log(getLogger("DatabaseExternalHive(" + database_name + ")")) { // std::optional hive_catalog_opt; // std::optional hive_db_opt; diff --git a/src/Databases/DatabaseExternalHive.h b/src/Databases/DatabaseExternalHive.h index 468bb4a7f0..125c02d711 100644 --- a/src/Databases/DatabaseExternalHive.h +++ b/src/Databases/DatabaseExternalHive.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -49,7 +50,7 @@ protected: private: String hive_catalog_name; ExternalCatalog::ExternalCatalogPtr hive_catalog; - Poco::Logger * log; + LoggerPtr log; mutable std::unordered_map> cache; mutable std::shared_mutex cache_mutex; }; diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index aecad80a1a..2f338e2aaa 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -697,7 +697,7 @@ void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const Iterat } ASTPtr DatabaseOnDisk::parseQueryFromMetadata( - Poco::Logger * logger, + LoggerPtr logger, ContextPtr local_context, const String & metadata_file_path, bool throw_on_error /*= true*/, diff --git a/src/Databases/DatabaseOnDisk.h b/src/Databases/DatabaseOnDisk.h index 10d3dabf78..64c5d9e66c 100644 --- a/src/Databases/DatabaseOnDisk.h +++ b/src/Databases/DatabaseOnDisk.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -90,7 +91,7 @@ public: String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.table); } String getMetadataPath() const override { return metadata_path; } - static ASTPtr parseQueryFromMetadata(Poco::Logger * log, ContextPtr context, const String & metadata_file_path, bool throw_on_error = true, bool remove_empty = false); + static ASTPtr parseQueryFromMetadata(LoggerPtr log, ContextPtr context, const String & metadata_file_path, bool throw_on_error = true, bool remove_empty = false); /// will throw when the table we want to attach already exists (in active / detached / detached permanently form) void checkMetadataFilenameAvailability(const String & to_table_name) const; diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index ed2947bd50..5fe2275669 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -82,7 +82,7 @@ namespace } } - void logAboutProgress(Poco::Logger * log, size_t processed, size_t total, AtomicStopwatch & watch) + void logAboutProgress(LoggerPtr log, size_t processed, size_t total, AtomicStopwatch & watch) { if (processed % PRINT_MESSAGE_EACH_N_OBJECTS == 0 || watch.compareAndRestart(PRINT_MESSAGE_EACH_N_SECONDS)) { diff --git a/src/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp index 20c8680d08..a520f144b8 100644 --- a/src/Databases/DatabasesCommon.cpp +++ b/src/Databases/DatabasesCommon.cpp @@ -130,7 +130,7 @@ ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr & ast_ } DatabaseWithOwnTablesBase::DatabaseWithOwnTablesBase(const String & name_, const String & logger, ContextPtr context_) - : IDatabase(name_), WithContext(context_->getGlobalContext()), log(&Poco::Logger::get(logger)) + : IDatabase(name_), WithContext(context_->getGlobalContext()), log(getLogger(logger)) { } diff --git a/src/Databases/DatabasesCommon.h b/src/Databases/DatabasesCommon.h index 5ecc39cafb..ccce961dda 100644 --- a/src/Databases/DatabasesCommon.h +++ b/src/Databases/DatabasesCommon.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -66,7 +67,7 @@ public: protected: Tables tables; - Poco::Logger * log; + LoggerPtr log; /// Information to log broken parts which fails to be loaded std::map brokenTables; diff --git a/src/Databases/MySQL/DatabaseCloudMaterializedMySQL.cpp b/src/Databases/MySQL/DatabaseCloudMaterializedMySQL.cpp index c6d809e7b0..dfdf8f1f2b 100644 --- a/src/Databases/MySQL/DatabaseCloudMaterializedMySQL.cpp +++ b/src/Databases/MySQL/DatabaseCloudMaterializedMySQL.cpp @@ -142,13 +142,13 @@ static void executeCreateSqlForSyncThread(const std::vector & create_com ParserCreateQuery parser; for (const auto & cmd : create_commands) { - LOG_DEBUG(&Poco::Logger::get("MySQLSyncThreadTask"), "Try to execute CREATE query: {}", cmd); + LOG_DEBUG(getLogger("MySQLSyncThreadTask"), "Try to execute CREATE query: {}", cmd); ASTPtr ast = parseQuery(parser, cmd, "", 0, create_context->getSettingsRef().max_parser_depth); InterpreterCreateQuery interpreter_tb(ast, create_context); interpreter_tb.execute(); } - LOG_DEBUG(&Poco::Logger::get("MySQLSyncThreadTask"), "Executed CREATE query successfully"); + LOG_DEBUG(getLogger("MySQLSyncThreadTask"), "Executed CREATE query successfully"); } void executeSyncThreadTaskCommandImpl(const MySQLSyncThreadCommand & command, ContextMutablePtr context) @@ -194,13 +194,13 @@ void executeSyncThreadTaskCommandImpl(const MySQLSyncThreadCommand & command, Co for (const auto & drop_command : drop_commands) { - LOG_DEBUG(&Poco::Logger::get("ExecuteSyncThreadCommand"), "Try to Execute DROP sql: {}", drop_command); + LOG_DEBUG(getLogger("ExecuteSyncThreadCommand"), "Try to Execute DROP sql: {}", drop_command); try { InterpreterDropQuery interpreter(parseQuery(parser, drop_command, "", 0, 0), context); interpreter.execute(); - LOG_DEBUG(&Poco::Logger::get("ExecuteSyncThreadCommand"), "Successfully executed DROP sql: {}", drop_command); + LOG_DEBUG(getLogger("ExecuteSyncThreadCommand"), "Successfully executed DROP sql: {}", drop_command); } catch (...) { diff --git a/src/Databases/MySQL/DatabaseMySQL.cpp b/src/Databases/MySQL/DatabaseMySQL.cpp index bb70c62086..f1a90bd3a0 100644 --- a/src/Databases/MySQL/DatabaseMySQL.cpp +++ b/src/Databases/MySQL/DatabaseMySQL.cpp @@ -301,7 +301,7 @@ std::map DatabaseMySQL::fetchTablesWithModificationTime(ContextP /// Handle exception when fetchTablesWithModificationTime for Database MySQL, currently known code: /// POCO_EXCEPTION: for connection error tryLogCurrentException( - &Poco::Logger::get("DatabaseMySQL(" + database_name + ")"), + getLogger("DatabaseMySQL(" + database_name + ")"), "Call to fetchTablesWithModificationTime wasn't finished successfully"); if (local_context->getSettingsRef().throw_exception_when_mysql_connection_failed) throw; diff --git a/src/Databases/MySQL/MaterializeMetadata.cpp b/src/Databases/MySQL/MaterializeMetadata.cpp index cfd4404fd0..c2515ee1a1 100644 --- a/src/Databases/MySQL/MaterializeMetadata.cpp +++ b/src/Databases/MySQL/MaterializeMetadata.cpp @@ -35,7 +35,7 @@ static std::unordered_map fetchTablesCreateQuery( { if (!materialized_tables_list.contains(fetch_table_name)) { - LOG_INFO(&Poco::Logger::get("fetchTablesCreateQuery"), "Skip table " + fetch_table_name + " as it is not in materialized_table_list"); + LOG_INFO(getLogger("fetchTablesCreateQuery"), "Skip table " + fetch_table_name + " as it is not in materialized_table_list"); continue; } @@ -76,7 +76,7 @@ std::vector MaterializeMetadata::fetchTablesInDB(const mysqlxx::PoolWith } for (const auto & table : tables_in_db) - LOG_INFO(&Poco::Logger::get("fetchTablesInDB"), "Fetched table from MySQL : " + database + "." + table); + LOG_INFO(getLogger("fetchTablesInDB"), "Fetched table from MySQL : " + database + "." + table); return tables_in_db; } @@ -298,7 +298,7 @@ void MaterializeMetadata::getTablesWithCreateQueryFromMySql(mysqlxx::PoolWithFai } catch (...) { - tryLogCurrentException(&Poco::Logger::get("GetTablesFromMysql"), + tryLogCurrentException(getLogger("GetTablesFromMysql"), "Failed to Unlock tables while getting tables list from mysql " + database); } }); diff --git a/src/Databases/MySQL/MaterializeMySQLSyncThread.cpp b/src/Databases/MySQL/MaterializeMySQLSyncThread.cpp index 2a33f3005a..b8c4dea319 100644 --- a/src/Databases/MySQL/MaterializeMySQLSyncThread.cpp +++ b/src/Databases/MySQL/MaterializeMySQLSyncThread.cpp @@ -54,7 +54,7 @@ static BlockIO tryToExecuteQuery(const String & query_to_execute, ContextMutable catch (...) { tryLogCurrentException( - &Poco::Logger::get("MaterializeMySQLSyncThread(" + database + ")"), + getLogger("MaterializeMySQLSyncThread(" + database + ")"), "Query " + query_to_execute + " wasn't finished successfully"); throw; } @@ -89,7 +89,7 @@ MaterializeMySQLSyncThread::MaterializeMySQLSyncThread( MaterializeMySQLSettings * settings_, const String & assigned_table) : WithContext(context_->getGlobalContext()) - , log(&Poco::Logger::get("MaterializedMySQLSyncThread (" + database_name_ + ") ")) + , log(getLogger("MaterializedMySQLSyncThread (" + database_name_ + ") ")) , database_name(database_name_) , mysql_database_name(mysql_database_name_) , assigned_materialized_table(assigned_table) @@ -765,7 +765,7 @@ void MaterializeMySQLSyncThread::Buffers::prepareCommit(ContextMutablePtr query_ if (!server_client) throw Exception("Failed to get ServerClient", ErrorCodes::LOGICAL_ERROR); - LOG_TRACE(&Poco::Logger::get("MaterializedMySQLSyncThread"), "Try to commit buffer data to server: {}", server_client->getRPCAddress()); + LOG_TRACE(getLogger("MaterializedMySQLSyncThread"), "Try to commit buffer data to server: {}", server_client->getRPCAddress()); /// set rpc info for committing later auto & client_info = query_context->getClientInfo(); @@ -801,7 +801,7 @@ void MaterializeMySQLSyncThread::Buffers::commit(ContextPtr context, const MySQL BlockOutputStreamPtr out = getTableOutput(database, table_name_and_buffer.first + "_" + table_suffix, query_context, true); Stopwatch watch; copyData(input, *out); - LOG_DEBUG(&Poco::Logger::get("MaterializeMySQLThread ({})"), "Copied {} rows and elapsed {} ms", + LOG_DEBUG(getLogger("MaterializeMySQLThread ({})"), "Copied {} rows and elapsed {} ms", table_name_and_buffer.first, table_name_and_buffer.second->first.rows(), watch.elapsedMilliseconds()); } diff --git a/src/Databases/MySQL/MaterializeMySQLSyncThread.h b/src/Databases/MySQL/MaterializeMySQLSyncThread.h index 953296f0a0..9e3a92e4cb 100644 --- a/src/Databases/MySQL/MaterializeMySQLSyncThread.h +++ b/src/Databases/MySQL/MaterializeMySQLSyncThread.h @@ -7,6 +7,7 @@ #if USE_MYSQL # include +# include # include # include # include @@ -62,7 +63,7 @@ public: void setBinLogInfo(const MySQLBinLogInfo & binlog) { binlog_info = binlog; } private: - Poco::Logger * log; + LoggerPtr log; String database_name; String mysql_database_name; diff --git a/src/Databases/MySQL/MaterializedMySQLSyncThreadManager.cpp b/src/Databases/MySQL/MaterializedMySQLSyncThreadManager.cpp index 9811d5321a..a48fb57dc5 100644 --- a/src/Databases/MySQL/MaterializedMySQLSyncThreadManager.cpp +++ b/src/Databases/MySQL/MaterializedMySQLSyncThreadManager.cpp @@ -62,7 +62,7 @@ static bool tryToExecuteQueryWithForwarding(const String & query_to_execute, Con catch (...) { tryLogCurrentException( - &Poco::Logger::get("MaterializeMySQLManager(" + database + ")"), + getLogger("MaterializeMySQLManager(" + database + ")"), "Query " + query_to_execute + " wasn't finished successfully"); throw; } @@ -81,7 +81,7 @@ static BlockIO tryToExecuteQuery(const String & query_to_execute, ContextMutable catch (...) { tryLogCurrentException( - &Poco::Logger::get("MaterializeMySQLManager(" + database + ")"), + getLogger("MaterializeMySQLManager(" + database + ")"), "Query " + query_to_execute + " wasn't finished successfully"); throw; } @@ -207,7 +207,7 @@ static inline void dumpDataForTables( auto iterator = need_dumping_tables.begin(); for (; iterator != need_dumping_tables.end() /*&& !is_cancelled()*/; ++iterator) { - LOG_DEBUG(&Poco::Logger::get("DumpDataForMySQLTable"), "Try to create and dump data for table {} with sql: {}", iterator->first, iterator->second); + LOG_DEBUG(getLogger("DumpDataForMySQLTable"), "Try to create and dump data for table {} with sql: {}", iterator->first, iterator->second); try { /// 1. create table @@ -233,7 +233,7 @@ static inline void dumpDataForTables( Stopwatch watch; copyData(input, *out); const Progress & progress = out->getProgress(); - LOG_INFO(&Poco::Logger::get("MaterializeMySQLSyncThread(" + database_name + ")"), + LOG_INFO(getLogger("MaterializeMySQLSyncThread(" + database_name + ")"), "Materialize MySQL step 1: dump {}, {} rows, {} in {} sec., {} rows/sec., {}/sec." , table_name, formatReadableQuantity(progress.written_rows), formatReadableSizeWithBinarySuffix(progress.written_bytes) , watch.elapsedSeconds(), formatReadableQuantity(static_cast(progress.written_rows / watch.elapsedSeconds())) @@ -245,7 +245,7 @@ static inline void dumpDataForTables( if (exception.code() == ErrorCodes::UNSUPPORTED_MYSQL_TABLE || exception.code() == ErrorCodes::NOT_IMPLEMENTED || exception.code() == ErrorCodes::SYNTAX_ERROR || exception.code() == ErrorCodes::ILLEGAL_COLUMN) { LOG_WARNING( - &Poco::Logger::get("MaterializeMySQLManager(" + database_name + ")"), + getLogger("MaterializeMySQLManager(" + database_name + ")"), "Skip unsupported MySQL table while trying to execute: [" + iterator->second + "]; " + "due to: " + getCurrentExceptionMessage(true)); if (!unsupported_tables_local.count(iterator->first)) @@ -1523,7 +1523,7 @@ void MaterializedMySQLSyncThreadManager::doResyncTable(const String & table_name return resync_tables.count(table_name) == 0 || isResyncTableCancelled(); }); const Progress & progress = out->getProgress(); - LOG_INFO(&Poco::Logger::get("MaterializeMySQLSyncThread(" + storage_id.database_name + ")"), + LOG_INFO(getLogger("MaterializeMySQLSyncThread(" + storage_id.database_name + ")"), "Materialize MySQL step 1: dump {}, {} rows, {} in {} sec., {} rows/sec., {}/sec." , table_name, formatReadableQuantity(progress.written_rows), formatReadableSizeWithBinarySuffix(progress.written_bytes) , watch.elapsedSeconds(), formatReadableQuantity(static_cast(progress.written_rows / watch.elapsedSeconds())) diff --git a/src/Databases/TablesDependencyGraph.cpp b/src/Databases/TablesDependencyGraph.cpp index 854328c05f..c687f80adc 100644 --- a/src/Databases/TablesDependencyGraph.cpp +++ b/src/Databases/TablesDependencyGraph.cpp @@ -736,10 +736,10 @@ void TablesDependencyGraph::log() const } -Poco::Logger * TablesDependencyGraph::getLogger() const +LoggerPtr TablesDependencyGraph::getLogger() const { if (!logger) - logger = &Poco::Logger::get(name_for_logging); + logger = ::getLogger(name_for_logging); return logger; } diff --git a/src/Databases/TablesDependencyGraph.h b/src/Databases/TablesDependencyGraph.h index 076442190f..b31e9bb561 100644 --- a/src/Databases/TablesDependencyGraph.h +++ b/src/Databases/TablesDependencyGraph.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -166,7 +167,7 @@ private: mutable bool levels_calculated = false; const String name_for_logging; - mutable Poco::Logger * logger = nullptr; + mutable LoggerPtr logger = nullptr; Node * findNode(const StorageID & table_id) const; Node * addOrUpdateNode(const StorageID & table_id); @@ -178,7 +179,7 @@ private: void setNeedRecalculateLevels() const; const NodesSortedByLevel & getNodesSortedByLevel() const; - Poco::Logger * getLogger() const; + LoggerPtr getLogger() const; }; } diff --git a/src/Dictionaries/CacheDictionary.cpp b/src/Dictionaries/CacheDictionary.cpp index 8a8a64fab3..a8dc9ee4f0 100644 --- a/src/Dictionaries/CacheDictionary.cpp +++ b/src/Dictionaries/CacheDictionary.cpp @@ -61,7 +61,7 @@ CacheDictionary::CacheDictionary( update(unit_to_update); }) , dict_lifetime(dict_lifetime_) - , log(&Poco::Logger::get("ExternalDictionaries")) + , log(getLogger("ExternalDictionaries")) , allow_read_expired_keys(allow_read_expired_keys_) , rnd_engine(randomSeed()) { diff --git a/src/Dictionaries/CacheDictionary.h b/src/Dictionaries/CacheDictionary.h index baaf99d290..18d3b8ea04 100644 --- a/src/Dictionaries/CacheDictionary.h +++ b/src/Dictionaries/CacheDictionary.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -198,7 +199,7 @@ private: const DictionaryLifetime dict_lifetime; - Poco::Logger * log; + LoggerPtr log; const bool allow_read_expired_keys; diff --git a/src/Dictionaries/CassandraDictionarySource.cpp b/src/Dictionaries/CassandraDictionarySource.cpp index 7605b86ef9..adca85a2f4 100644 --- a/src/Dictionaries/CassandraDictionarySource.cpp +++ b/src/Dictionaries/CassandraDictionarySource.cpp @@ -99,7 +99,7 @@ CassandraDictionarySource::CassandraDictionarySource( const DictionaryStructure & dict_struct_, const CassandraSettings & settings_, const Block & sample_block_) - : log(&Poco::Logger::get("CassandraDictionarySource")) + : log(getLogger("CassandraDictionarySource")) , dict_struct(dict_struct_) , settings(settings_) , sample_block(sample_block_) diff --git a/src/Dictionaries/CassandraDictionarySource.h b/src/Dictionaries/CassandraDictionarySource.h index c0a4e774d2..5a3550893a 100644 --- a/src/Dictionaries/CassandraDictionarySource.h +++ b/src/Dictionaries/CassandraDictionarySource.h @@ -1,5 +1,6 @@ #pragma once +#include #include #if USE_CASSANDRA @@ -74,7 +75,7 @@ private: void maybeAllowFiltering(String & query) const; CassSessionShared getSession(); - Poco::Logger * log; + LoggerPtr log; const DictionaryStructure dict_struct; const CassandraSettings settings; Block sample_block; diff --git a/src/Dictionaries/CassandraHelpers.cpp b/src/Dictionaries/CassandraHelpers.cpp index 81f7d6d9a6..45dbc85b28 100644 --- a/src/Dictionaries/CassandraHelpers.cpp +++ b/src/Dictionaries/CassandraHelpers.cpp @@ -47,7 +47,7 @@ void setupCassandraDriverLibraryLogging(CassLogLevel level) { std::call_once(setup_logging_flag, [level]() { - Poco::Logger * logger = &Poco::Logger::get("CassandraDriverLibrary"); + LoggerRawPtr logger = getRawLogger("CassandraDriverLibrary"); cass_log_set_level(level); if (level != CASS_LOG_DISABLED) cass_log_set_callback(cassandraLogCallback, logger); @@ -56,7 +56,7 @@ void setupCassandraDriverLibraryLogging(CassLogLevel level) void cassandraLogCallback(const CassLogMessage * message, void * data) { - Poco::Logger * logger = static_cast(data); + LoggerRawPtr logger = static_cast(data); if (message->severity == CASS_LOG_CRITICAL || message->severity == CASS_LOG_ERROR) LOG_ERROR(logger, message->message); else if (message->severity == CASS_LOG_WARN) diff --git a/src/Dictionaries/ClickHouseDictionarySource.h b/src/Dictionaries/ClickHouseDictionarySource.h index 630f19b377..a857e68c1b 100644 --- a/src/Dictionaries/ClickHouseDictionarySource.h +++ b/src/Dictionaries/ClickHouseDictionarySource.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -84,7 +85,7 @@ private: ContextMutablePtr context; ConnectionPoolWithFailoverPtr pool; const std::string load_all_query; - Poco::Logger * log = &Poco::Logger::get("ClickHouseDictionarySource"); + LoggerPtr log = getLogger("ClickHouseDictionarySource"); }; } diff --git a/src/Dictionaries/DictionaryFactory.cpp b/src/Dictionaries/DictionaryFactory.cpp index 62b28ed7d1..3d96772aa0 100644 --- a/src/Dictionaries/DictionaryFactory.cpp +++ b/src/Dictionaries/DictionaryFactory.cpp @@ -46,7 +46,7 @@ DictionaryPtr DictionaryFactory::create( DictionarySourcePtr source_ptr = DictionarySourceFactory::instance().create( name, config, config_prefix + ".source", dict_struct, context, config.getString(config_prefix + ".database", ""), created_from_ddl); - LOG_TRACE(&Poco::Logger::get("DictionaryFactory"), "Created dictionary source '{}' for dictionary '{}'", source_ptr->toString(), name); + LOG_TRACE(getLogger("DictionaryFactory"), "Created dictionary source '{}' for dictionary '{}'", source_ptr->toString(), name); if (context->hasQueryContext() && context->getSettingsRef().log_queries) context->getQueryContext()->addQueryFactoriesInfo(Context::QueryLogFactories::Dictionary, name); diff --git a/src/Dictionaries/DictionarySourceFactory.cpp b/src/Dictionaries/DictionarySourceFactory.cpp index 1992c87d31..199a8fd830 100644 --- a/src/Dictionaries/DictionarySourceFactory.cpp +++ b/src/Dictionaries/DictionarySourceFactory.cpp @@ -65,7 +65,7 @@ namespace } -DictionarySourceFactory::DictionarySourceFactory() : log(&Poco::Logger::get("DictionarySourceFactory")) +DictionarySourceFactory::DictionarySourceFactory() : log(getLogger("DictionarySourceFactory")) { } diff --git a/src/Dictionaries/DictionarySourceFactory.h b/src/Dictionaries/DictionarySourceFactory.h index bb583927ac..885fe70ad3 100644 --- a/src/Dictionaries/DictionarySourceFactory.h +++ b/src/Dictionaries/DictionarySourceFactory.h @@ -1,5 +1,6 @@ #pragma once +#include #include "IDictionarySource.h" #include #include @@ -56,7 +57,7 @@ private: using SourceRegistry = std::unordered_map; SourceRegistry registered_sources; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Dictionaries/Embedded/RegionsHierarchies.cpp b/src/Dictionaries/Embedded/RegionsHierarchies.cpp index 15e14db466..6d7398aab7 100644 --- a/src/Dictionaries/Embedded/RegionsHierarchies.cpp +++ b/src/Dictionaries/Embedded/RegionsHierarchies.cpp @@ -6,7 +6,7 @@ RegionsHierarchies::RegionsHierarchies(IRegionsHierarchiesDataProviderPtr data_provider) { - Poco::Logger * log = &Poco::Logger::get("RegionsHierarchies"); + LoggerPtr log = getLogger("RegionsHierarchies"); LOG_DEBUG(log, "Adding default regions hierarchy"); data.emplace("", data_provider->getDefaultHierarchySource()); diff --git a/src/Dictionaries/Embedded/RegionsHierarchy.cpp b/src/Dictionaries/Embedded/RegionsHierarchy.cpp index 115ae30d93..f578e66e2c 100644 --- a/src/Dictionaries/Embedded/RegionsHierarchy.cpp +++ b/src/Dictionaries/Embedded/RegionsHierarchy.cpp @@ -23,7 +23,7 @@ RegionsHierarchy::RegionsHierarchy(IRegionsHierarchyDataSourcePtr data_source_) void RegionsHierarchy::reload() { - Poco::Logger * log = &Poco::Logger::get("RegionsHierarchy"); + LoggerPtr log = getLogger("RegionsHierarchy"); if (!data_source->isModified()) return; diff --git a/src/Dictionaries/Embedded/RegionsNames.cpp b/src/Dictionaries/Embedded/RegionsNames.cpp index 30ba8259b3..ea23f995fb 100644 --- a/src/Dictionaries/Embedded/RegionsNames.cpp +++ b/src/Dictionaries/Embedded/RegionsNames.cpp @@ -42,7 +42,7 @@ std::string RegionsNames::dumpSupportedLanguagesNames() void RegionsNames::reload() { - Poco::Logger * log = &Poco::Logger::get("RegionsNames"); + LoggerPtr log = getLogger("RegionsNames"); LOG_DEBUG(log, "Reloading regions names"); RegionID max_region_id = 0; diff --git a/src/Dictionaries/ExecutableDictionarySource.cpp b/src/Dictionaries/ExecutableDictionarySource.cpp index b7a00c0886..7757a904b7 100644 --- a/src/Dictionaries/ExecutableDictionarySource.cpp +++ b/src/Dictionaries/ExecutableDictionarySource.cpp @@ -36,9 +36,9 @@ namespace class ShellCommandOwningBlockInputStream : public OwningBlockInputStream { private: - Poco::Logger * log; + LoggerPtr log; public: - ShellCommandOwningBlockInputStream(Poco::Logger * log_, const BlockInputStreamPtr & impl, std::unique_ptr command_) + ShellCommandOwningBlockInputStream(LoggerPtr log_, const BlockInputStreamPtr & impl, std::unique_ptr command_) : OwningBlockInputStream(std::move(impl), std::move(command_)), log(log_) { } @@ -63,7 +63,7 @@ ExecutableDictionarySource::ExecutableDictionarySource( const Configuration & configuration_, Block & sample_block_, ContextPtr context_) - : log(&Poco::Logger::get("ExecutableDictionarySource")) + : log(getLogger("ExecutableDictionarySource")) , dict_struct(dict_struct_) , configuration(configuration_) , sample_block{sample_block_} @@ -84,7 +84,7 @@ ExecutableDictionarySource::ExecutableDictionarySource( } ExecutableDictionarySource::ExecutableDictionarySource(const ExecutableDictionarySource & other) - : log(&Poco::Logger::get("ExecutableDictionarySource")) + : log(getLogger("ExecutableDictionarySource")) , update_time(other.update_time) , dict_struct(other.dict_struct) , configuration(other.configuration) @@ -135,7 +135,7 @@ namespace const std::string & format, const Block & sample_block, const std::string & command_str, - Poco::Logger * log_, + LoggerPtr log_, std::function && send_data_) : log(log_), command(ShellCommand::execute(command_str)), @@ -184,7 +184,7 @@ namespace String getName() const override { return "WithBackgroundThread"; } - Poco::Logger * log; + LoggerPtr log; BlockInputStreamPtr stream; std::unique_ptr command; std::function send_data; diff --git a/src/Dictionaries/ExecutableDictionarySource.h b/src/Dictionaries/ExecutableDictionarySource.h index 0b92023df3..bf6807ec66 100644 --- a/src/Dictionaries/ExecutableDictionarySource.h +++ b/src/Dictionaries/ExecutableDictionarySource.h @@ -1,5 +1,6 @@ #pragma once +#include #include "DictionaryStructure.h" #include "IDictionarySource.h" #include @@ -61,7 +62,7 @@ public: BlockInputStreamPtr getStreamForBlock(const Block & block); private: - Poco::Logger * log; + LoggerPtr log; time_t update_time = 0; const DictionaryStructure dict_struct; const Configuration configuration; diff --git a/src/Dictionaries/ExecutablePoolDictionarySource.cpp b/src/Dictionaries/ExecutablePoolDictionarySource.cpp index 08500165c9..0aad0c99f4 100644 --- a/src/Dictionaries/ExecutablePoolDictionarySource.cpp +++ b/src/Dictionaries/ExecutablePoolDictionarySource.cpp @@ -33,7 +33,7 @@ ExecutablePoolDictionarySource::ExecutablePoolDictionarySource( const Configuration & configuration_, Block & sample_block_, ContextPtr context_) - : log(&Poco::Logger::get("ExecutablePoolDictionarySource")) + : log(getLogger("ExecutablePoolDictionarySource")) , dict_struct{dict_struct_} , configuration{configuration_} , sample_block{sample_block_} @@ -58,7 +58,7 @@ ExecutablePoolDictionarySource::ExecutablePoolDictionarySource( } ExecutablePoolDictionarySource::ExecutablePoolDictionarySource(const ExecutablePoolDictionarySource & other) - : log(&Poco::Logger::get("ExecutablePoolDictionarySource")) + : log(getLogger("ExecutablePoolDictionarySource")) , update_time{other.update_time} , dict_struct{other.dict_struct} , configuration{other.configuration} @@ -91,7 +91,7 @@ namespace std::unique_ptr && command_, BlockInputStreamPtr && stream_, size_t read_rows_, - Poco::Logger * log_, + LoggerPtr log_, std::function && send_data_) : process_pool(process_pool_) , command(std::move(command_)) @@ -183,7 +183,7 @@ namespace std::unique_ptr command; BlockInputStreamPtr stream; size_t rows_to_read; - Poco::Logger * log; + LoggerPtr log; std::function send_data; ThreadFromGlobalPool thread; size_t current_read_rows = 0; diff --git a/src/Dictionaries/ExecutablePoolDictionarySource.h b/src/Dictionaries/ExecutablePoolDictionarySource.h index 02b0288a52..ffd28a4d30 100644 --- a/src/Dictionaries/ExecutablePoolDictionarySource.h +++ b/src/Dictionaries/ExecutablePoolDictionarySource.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -73,7 +74,7 @@ public: BlockInputStreamPtr getStreamForBlock(const Block & block); private: - Poco::Logger * log; + LoggerPtr log; time_t update_time = 0; const DictionaryStructure dict_struct; const Configuration configuration; diff --git a/src/Dictionaries/FileDictionarySource.cpp b/src/Dictionaries/FileDictionarySource.cpp index 239c13e71c..bcfe1ecb27 100644 --- a/src/Dictionaries/FileDictionarySource.cpp +++ b/src/Dictionaries/FileDictionarySource.cpp @@ -47,7 +47,7 @@ FileDictionarySource::FileDictionarySource(const FileDictionarySource & other) BlockInputStreamPtr FileDictionarySource::loadAll() { - LOG_TRACE(&Poco::Logger::get("FileDictionary"), "loadAll {}", toString()); + LOG_TRACE(getLogger("FileDictionary"), "loadAll {}", toString()); auto in_ptr = std::make_unique(filepath); auto stream = context->getInputFormat(format, *in_ptr, sample_block, max_block_size); last_modification = getLastModification(); diff --git a/src/Dictionaries/HTTPDictionarySource.cpp b/src/Dictionaries/HTTPDictionarySource.cpp index b1b1968454..d4a990fead 100644 --- a/src/Dictionaries/HTTPDictionarySource.cpp +++ b/src/Dictionaries/HTTPDictionarySource.cpp @@ -33,7 +33,7 @@ HTTPDictionarySource::HTTPDictionarySource( Block & sample_block_, ContextPtr context_, bool created_from_ddl) - : log(&Poco::Logger::get("HTTPDictionarySource")) + : log(getLogger("HTTPDictionarySource")) , update_time(std::chrono::system_clock::from_time_t(0)) , dict_struct(dict_struct_) , configuration(configuration_) @@ -49,7 +49,7 @@ HTTPDictionarySource::HTTPDictionarySource( } HTTPDictionarySource::HTTPDictionarySource(const HTTPDictionarySource & other) - : log(&Poco::Logger::get("HTTPDictionarySource")) + : log(getLogger("HTTPDictionarySource")) , update_time(other.update_time) , dict_struct(other.dict_struct) , configuration(other.configuration) diff --git a/src/Dictionaries/HTTPDictionarySource.h b/src/Dictionaries/HTTPDictionarySource.h index ee393d62ae..1eec50543a 100644 --- a/src/Dictionaries/HTTPDictionarySource.h +++ b/src/Dictionaries/HTTPDictionarySource.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -67,7 +68,7 @@ private: // wrap buffer using encoding from made request BlockInputStreamPtr createWrappedBuffer(std::unique_ptr http_buffer); - Poco::Logger * log; + LoggerPtr log; LocalDateTime getLastModification() const; diff --git a/src/Dictionaries/HashedDictionary.cpp b/src/Dictionaries/HashedDictionary.cpp index d65338b9a4..80faa40885 100644 --- a/src/Dictionaries/HashedDictionary.cpp +++ b/src/Dictionaries/HashedDictionary.cpp @@ -575,7 +575,7 @@ void HashedDictionary::loadData() size_t current_new_size = new_size.exchange(0); if (current_new_size) { - LOG_TRACE(&Poco::Logger::get("HashedDictionary"), "Preallocated {} elements", current_new_size); + LOG_TRACE(getLogger("HashedDictionary"), "Preallocated {} elements", current_new_size); resize(current_new_size); } } diff --git a/src/Dictionaries/IPAddressDictionary.cpp b/src/Dictionaries/IPAddressDictionary.cpp index d42a6947d9..90c2b330cb 100644 --- a/src/Dictionaries/IPAddressDictionary.cpp +++ b/src/Dictionaries/IPAddressDictionary.cpp @@ -202,7 +202,7 @@ IPAddressDictionary::IPAddressDictionary( , dict_lifetime(dict_lifetime_) , require_nonempty(require_nonempty_) , access_to_key_from_attributes(dict_struct_.access_to_key_from_attributes) - , logger(&Poco::Logger::get("IPAddressDictionary")) + , logger(getLogger("IPAddressDictionary")) { createAttributes(); loadData(); diff --git a/src/Dictionaries/IPAddressDictionary.h b/src/Dictionaries/IPAddressDictionary.h index bc0dc9f99a..90b39124b9 100644 --- a/src/Dictionaries/IPAddressDictionary.h +++ b/src/Dictionaries/IPAddressDictionary.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -225,7 +226,7 @@ private: mutable std::atomic query_count{0}; mutable std::atomic found_count{0}; - Poco::Logger * logger; + LoggerPtr logger; }; } diff --git a/src/Dictionaries/LibraryDictionarySource.cpp b/src/Dictionaries/LibraryDictionarySource.cpp index d16a3dfb18..a1cac77d5e 100644 --- a/src/Dictionaries/LibraryDictionarySource.cpp +++ b/src/Dictionaries/LibraryDictionarySource.cpp @@ -30,7 +30,7 @@ LibraryDictionarySource::LibraryDictionarySource( Block & sample_block_, ContextPtr context_, bool created_from_ddl) - : log(&Poco::Logger::get("LibraryDictionarySource")) + : log(getLogger("LibraryDictionarySource")) , dict_struct{dict_struct_} , config_prefix{config_prefix_} , path{config.getString(config_prefix + ".path", "")} @@ -78,7 +78,7 @@ LibraryDictionarySource::~LibraryDictionarySource() LibraryDictionarySource::LibraryDictionarySource(const LibraryDictionarySource & other) - : log(&Poco::Logger::get("LibraryDictionarySource")) + : log(getLogger("LibraryDictionarySource")) , dict_struct{other.dict_struct} , config_prefix{other.config_prefix} , path{other.path} diff --git a/src/Dictionaries/LibraryDictionarySource.h b/src/Dictionaries/LibraryDictionarySource.h index 8daef43606..cf7d8fe161 100644 --- a/src/Dictionaries/LibraryDictionarySource.h +++ b/src/Dictionaries/LibraryDictionarySource.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -75,7 +76,7 @@ private: static Field getDictID() { return UUIDHelpers::generateV4(); } - Poco::Logger * log; + LoggerPtr log; const DictionaryStructure dict_struct; const std::string config_prefix; diff --git a/src/Dictionaries/MySQLDictionarySource.cpp b/src/Dictionaries/MySQLDictionarySource.cpp index 1d706c176d..38a983d2c7 100644 --- a/src/Dictionaries/MySQLDictionarySource.cpp +++ b/src/Dictionaries/MySQLDictionarySource.cpp @@ -87,7 +87,7 @@ MySQLDictionarySource::MySQLDictionarySource( mysqlxx::PoolWithFailoverPtr pool_, const Block & sample_block_, const StreamSettings & settings_) - : log(&Poco::Logger::get("MySQLDictionarySource")) + : log(getLogger("MySQLDictionarySource")) , update_time(std::chrono::system_clock::from_time_t(0)) , dict_struct(dict_struct_) , configuration(configuration_) @@ -101,7 +101,7 @@ MySQLDictionarySource::MySQLDictionarySource( /// copy-constructor is provided in order to support cloneability MySQLDictionarySource::MySQLDictionarySource(const MySQLDictionarySource & other) - : log(&Poco::Logger::get("MySQLDictionarySource")) + : log(getLogger("MySQLDictionarySource")) , update_time(other.update_time) , dict_struct(other.dict_struct) , configuration(other.configuration) diff --git a/src/Dictionaries/MySQLDictionarySource.h b/src/Dictionaries/MySQLDictionarySource.h index 80149d1d20..cca7adb5b6 100644 --- a/src/Dictionaries/MySQLDictionarySource.h +++ b/src/Dictionaries/MySQLDictionarySource.h @@ -1,5 +1,6 @@ #pragma once +#include #include #if !defined(ARCADIA_BUILD) @@ -83,7 +84,7 @@ private: // execute invalidate_query. expects single cell in result std::string doInvalidateQuery(const std::string & request) const; - Poco::Logger * log; + LoggerPtr log; std::chrono::time_point update_time; const DictionaryStructure dict_struct; diff --git a/src/Dictionaries/PolygonDictionaryUtils.cpp b/src/Dictionaries/PolygonDictionaryUtils.cpp index b07bf69c54..e2ffd27eb5 100644 --- a/src/Dictionaries/PolygonDictionaryUtils.cpp +++ b/src/Dictionaries/PolygonDictionaryUtils.cpp @@ -67,7 +67,7 @@ const FinalCellWithSlabs * FinalCellWithSlabs::find(Coord, Coord) const SlabsPolygonIndex::SlabsPolygonIndex( const std::vector & polygons) - : log(&Poco::Logger::get("SlabsPolygonIndex")), + : log(getLogger("SlabsPolygonIndex")), sorted_x(uniqueX(polygons)) { indexBuild(polygons); diff --git a/src/Dictionaries/PolygonDictionaryUtils.h b/src/Dictionaries/PolygonDictionaryUtils.h index 865d78a7ca..dd41811842 100644 --- a/src/Dictionaries/PolygonDictionaryUtils.h +++ b/src/Dictionaries/PolygonDictionaryUtils.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -81,7 +82,7 @@ private: /** Auxiliary function for adding ring to the index */ void indexAddRing(const Ring & ring, size_t polygon_id); - Poco::Logger * log; + LoggerPtr log; /** Sorted distinct coordinates of all vertices */ std::vector sorted_x; diff --git a/src/Dictionaries/PostgreSQLDictionarySource.cpp b/src/Dictionaries/PostgreSQLDictionarySource.cpp index ed80f67df8..a0a82e50ef 100644 --- a/src/Dictionaries/PostgreSQLDictionarySource.cpp +++ b/src/Dictionaries/PostgreSQLDictionarySource.cpp @@ -55,7 +55,7 @@ PostgreSQLDictionarySource::PostgreSQLDictionarySource( , configuration(configuration_) , pool(std::move(pool_)) , sample_block(sample_block_) - , log(&Poco::Logger::get("PostgreSQLDictionarySource")) + , log(getLogger("PostgreSQLDictionarySource")) , query_builder(makeExternalQueryBuilder(dict_struct, configuration.schema, configuration.table, configuration.where)) , load_all_query(query_builder.composeLoadAllQuery()) { @@ -68,7 +68,7 @@ PostgreSQLDictionarySource::PostgreSQLDictionarySource(const PostgreSQLDictionar , configuration(other.configuration) , pool(other.pool) , sample_block(other.sample_block) - , log(&Poco::Logger::get("PostgreSQLDictionarySource")) + , log(getLogger("PostgreSQLDictionarySource")) , query_builder(makeExternalQueryBuilder(dict_struct, configuration.schema, configuration.table, configuration.where)) , load_all_query(query_builder.composeLoadAllQuery()) , update_time(other.update_time) diff --git a/src/Dictionaries/PostgreSQLDictionarySource.h b/src/Dictionaries/PostgreSQLDictionarySource.h index 0dc98f92c8..a7817c1f15 100644 --- a/src/Dictionaries/PostgreSQLDictionarySource.h +++ b/src/Dictionaries/PostgreSQLDictionarySource.h @@ -1,6 +1,7 @@ #pragma once #if !defined(ARCADIA_BUILD) +#include #include "config_core.h" #endif #include "DictionaryStructure.h" @@ -63,7 +64,7 @@ private: const Configuration configuration; postgres::PoolWithFailoverPtr pool; Block sample_block; - Poco::Logger * log; + LoggerPtr log; ExternalQueryBuilder query_builder; const std::string load_all_query; std::chrono::time_point update_time; diff --git a/src/Dictionaries/XDBCDictionarySource.cpp b/src/Dictionaries/XDBCDictionarySource.cpp index 83ce0f7e5b..52a503f786 100644 --- a/src/Dictionaries/XDBCDictionarySource.cpp +++ b/src/Dictionaries/XDBCDictionarySource.cpp @@ -104,7 +104,7 @@ XDBCDictionarySource::XDBCDictionarySource( ContextPtr context_, const BridgeHelperPtr bridge_) : WithContext(context_->getGlobalContext()) - , log(&Poco::Logger::get(bridge_->getName() + "DictionarySource")) + , log(getLogger(bridge_->getName() + "DictionarySource")) , update_time(std::chrono::system_clock::from_time_t(0)) , dict_struct(dict_struct_) , configuration(configuration_) @@ -123,7 +123,7 @@ XDBCDictionarySource::XDBCDictionarySource( /// copy-constructor is provided in order to support cloneability XDBCDictionarySource::XDBCDictionarySource(const XDBCDictionarySource & other) : WithContext(other.getContext()) - , log(&Poco::Logger::get(other.bridge_helper->getName() + "DictionarySource")) + , log(getLogger(other.bridge_helper->getName() + "DictionarySource")) , update_time(other.update_time) , dict_struct(other.dict_struct) , configuration(other.configuration) diff --git a/src/Dictionaries/XDBCDictionarySource.h b/src/Dictionaries/XDBCDictionarySource.h index d601d93085..808afc9d8f 100644 --- a/src/Dictionaries/XDBCDictionarySource.h +++ b/src/Dictionaries/XDBCDictionarySource.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -75,7 +76,7 @@ private: BlockInputStreamPtr loadFromQuery(const Poco::URI & url, const Block & required_sample_block, const std::string & query) const; - Poco::Logger * log; + LoggerPtr log; std::chrono::time_point update_time; const DictionaryStructure dict_struct; diff --git a/src/Disks/DiskByteS3.cpp b/src/Disks/DiskByteS3.cpp index 296f666956..e92edd3269 100644 --- a/src/Disks/DiskByteS3.cpp +++ b/src/Disks/DiskByteS3.cpp @@ -104,7 +104,7 @@ private: std::filesystem::path prefix; size_t idx {0}; - Poco::Logger * log {&Poco::Logger::get("DiskByteS3DirectoryIterator")}; + LoggerPtr log{getLogger("DiskByteS3DirectoryIterator")}; }; class DiskByteS3Reservation : public IReservation diff --git a/src/Disks/DiskByteS3.h b/src/Disks/DiskByteS3.h index 7f782aa183..d2fdcb946f 100644 --- a/src/Disks/DiskByteS3.h +++ b/src/Disks/DiskByteS3.h @@ -16,7 +16,7 @@ #pragma once #include -#include +#include #include #include #include @@ -136,7 +136,7 @@ private: static String trimPrefix(const String& prefix, const String& key); - Poco::Logger * log = &Poco::Logger::get("DiskByteS3"); + LoggerPtr log = getLogger("DiskByteS3"); const UInt64 disk_id; String name; diff --git a/src/Disks/DiskCacheWrapper.h b/src/Disks/DiskCacheWrapper.h index b9663fa5c1..13ea1eaeee 100644 --- a/src/Disks/DiskCacheWrapper.h +++ b/src/Disks/DiskCacheWrapper.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include "DiskDecorator.h" @@ -84,7 +85,7 @@ private: /// Protects concurrent downloading files to cache. mutable std::mutex mutex; - Poco::Logger * log = &Poco::Logger::get("DiskCache"); + LoggerPtr log = getLogger("DiskCache"); }; } diff --git a/src/Disks/DiskLocal.cpp b/src/Disks/DiskLocal.cpp index 3fed0b43f9..e11dc3b470 100644 --- a/src/Disks/DiskLocal.cpp +++ b/src/Disks/DiskLocal.cpp @@ -464,7 +464,7 @@ void registerDiskLocal(DiskFactory & factory) if (!fs::exists(tmp_path)) { - LOG_WARNING(&Poco::Logger::get("DiskLocal"), "Can't find path {} so keep-free is forced to set zero", tmp_path); + LOG_WARNING(getLogger("DiskLocal"), "Can't find path {} so keep-free is forced to set zero", tmp_path); return std::make_shared(name, path, DiskStats{}); } diff --git a/src/Disks/DiskLocal.h b/src/Disks/DiskLocal.h index 2fe954bbeb..ac6a48d98c 100644 --- a/src/Disks/DiskLocal.h +++ b/src/Disks/DiskLocal.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -135,7 +136,7 @@ private: static std::mutex reservation_mutex; - Poco::Logger * log = &Poco::Logger::get("DiskLocal"); + LoggerPtr log = getLogger("DiskLocal"); }; } diff --git a/src/Disks/DiskRestartProxy.h b/src/Disks/DiskRestartProxy.h index b6a86cab7e..242464c693 100644 --- a/src/Disks/DiskRestartProxy.h +++ b/src/Disks/DiskRestartProxy.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include @@ -92,7 +93,7 @@ private: /// Mutex to protect RW access. mutable std::shared_timed_mutex mutex; - Poco::Logger * log = &Poco::Logger::get("DiskRestartProxy"); + LoggerPtr log = getLogger("DiskRestartProxy"); }; } diff --git a/src/Disks/DiskSelector.cpp b/src/Disks/DiskSelector.cpp index 15fbb1c828..a85465a143 100644 --- a/src/Disks/DiskSelector.cpp +++ b/src/Disks/DiskSelector.cpp @@ -149,7 +149,7 @@ DiskSelectorPtr DiskSelector::updateFromConfig( } writeString(" disappeared from configuration, this change will be applied after restart of ClickHouse", warning); - LOG_WARNING(&Poco::Logger::get("DiskSelector"), warning.str()); + LOG_WARNING(getLogger("DiskSelector"), warning.str()); } result->flushDiskInfo(); diff --git a/src/Disks/HDFS/DiskByteHDFS.h b/src/Disks/HDFS/DiskByteHDFS.h index 35da5906e1..a16b80218f 100644 --- a/src/Disks/HDFS/DiskByteHDFS.h +++ b/src/Disks/HDFS/DiskByteHDFS.h @@ -17,7 +17,7 @@ #include #include -#include +#include #include #include #include @@ -106,7 +106,7 @@ public: private: inline String absolutePath(const String& relative_path) const; - Poco::Logger * log = &Poco::Logger::get("DiskByteHDFS"); + LoggerPtr log = getLogger("DiskByteHDFS"); const String disk_name; const String disk_path; diff --git a/src/Disks/IDisk.cpp b/src/Disks/IDisk.cpp index f2f3a1a9db..5da9558466 100644 --- a/src/Disks/IDisk.cpp +++ b/src/Disks/IDisk.cpp @@ -28,7 +28,7 @@ bool IDisk::isDirectoryEmpty(const String & path) void copyFile(IDisk & from_disk, const String & from_path, IDisk & to_disk, const String & to_path) { - LOG_DEBUG(&Poco::Logger::get("IDisk"), "Copying from {} (path: {}) {} to {} (path: {}) {}.", + LOG_DEBUG(getLogger("IDisk"), "Copying from {} (path: {}) {} to {} (path: {}) {}.", from_disk.getName(), from_disk.getPath(), from_path, to_disk.getName(), to_disk.getPath(), to_path); auto in = from_disk.readFile(from_path); diff --git a/src/Disks/IDiskRemote.cpp b/src/Disks/IDiskRemote.cpp index b0dc274727..87a78af911 100644 --- a/src/Disks/IDiskRemote.cpp +++ b/src/Disks/IDiskRemote.cpp @@ -313,7 +313,7 @@ IDiskRemote::IDiskRemote( const String & log_name_, size_t thread_pool_size) : IDisk(std::make_unique(log_name_, thread_pool_size)) - , log(&Poco::Logger::get(log_name_)) + , log(getLogger(log_name_)) , name(name_) , remote_fs_root_path(remote_fs_root_path_) , metadata_path(metadata_path_) diff --git a/src/Disks/IDiskRemote.h b/src/Disks/IDiskRemote.h index e06f9d4ca5..8f6b8746b6 100644 --- a/src/Disks/IDiskRemote.h +++ b/src/Disks/IDiskRemote.h @@ -22,6 +22,7 @@ #pragma once #if !defined(ARCADIA_BUILD) +#include #include #endif @@ -145,7 +146,7 @@ public: virtual RemoteFSPathKeeperPtr createFSPathKeeper() const = 0; protected: - Poco::Logger * log; + LoggerPtr log; const String name; const String remote_fs_root_path; diff --git a/src/Disks/IO/AsynchronousBoundedReadBuffer.cpp b/src/Disks/IO/AsynchronousBoundedReadBuffer.cpp index f9be9adcd8..a804330559 100644 --- a/src/Disks/IO/AsynchronousBoundedReadBuffer.cpp +++ b/src/Disks/IO/AsynchronousBoundedReadBuffer.cpp @@ -53,7 +53,7 @@ AsynchronousBoundedReadBuffer::AsynchronousBoundedReadBuffer( , read_settings(settings_) , reader(reader_) , prefetch_buffer(settings_.remote_fs_buffer_size) - , log(&Poco::Logger::get("AsynchronousBoundedReadBuffer")) + , log(getLogger("AsynchronousBoundedReadBuffer")) , async_read_counters(async_read_counters_) { ProfileEvents::increment(ProfileEvents::RemoteFSBuffers); diff --git a/src/Disks/IO/AsynchronousBoundedReadBuffer.h b/src/Disks/IO/AsynchronousBoundedReadBuffer.h index 6b78496f1e..28738ab37b 100644 --- a/src/Disks/IO/AsynchronousBoundedReadBuffer.h +++ b/src/Disks/IO/AsynchronousBoundedReadBuffer.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -64,7 +65,7 @@ private: Memory<> prefetch_buffer; std::future prefetch_future; - Poco::Logger * log; + LoggerPtr log; AsyncReadCountersPtr async_read_counters; diff --git a/src/Disks/IO/IOUringReader.cpp b/src/Disks/IO/IOUringReader.cpp index acb33e4d5b..551253aa8a 100644 --- a/src/Disks/IO/IOUringReader.cpp +++ b/src/Disks/IO/IOUringReader.cpp @@ -50,7 +50,7 @@ namespace ErrorCodes } IOUringReader::IOUringReader(uint32_t entries_, bool enable_sqpoll_) - : enable_sqpoll(enable_sqpoll_), enable_iopoll(false), log(&Poco::Logger::get("IOUringReader")) + : enable_sqpoll(enable_sqpoll_), enable_iopoll(false), log(getLogger("IOUringReader")) { struct io_uring_probe * probe = io_uring_get_probe(); if (!probe) diff --git a/src/Disks/IO/IOUringReader.h b/src/Disks/IO/IOUringReader.h index 5f27dcb20f..20b8bd5334 100644 --- a/src/Disks/IO/IOUringReader.h +++ b/src/Disks/IO/IOUringReader.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -79,7 +80,7 @@ private: std::mutex submit_lock; std::deque pending_requests; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Disks/S3/ProxyListConfiguration.cpp b/src/Disks/S3/ProxyListConfiguration.cpp index 365df67727..7b03cc6c81 100644 --- a/src/Disks/S3/ProxyListConfiguration.cpp +++ b/src/Disks/S3/ProxyListConfiguration.cpp @@ -22,7 +22,7 @@ ClientConfigurationPerRequest ProxyListConfiguration::getConfiguration(const Aws cfg.proxy_host = proxies[index].getHost(); cfg.proxy_port = proxies[index].getPort(); - LOG_DEBUG(&Poco::Logger::get("AWSClient"), "Use proxy: {}", proxies[index].toString()); + LOG_DEBUG(getLogger("AWSClient"), "Use proxy: {}", proxies[index].toString()); return cfg; } diff --git a/src/Disks/S3/ProxyResolverConfiguration.cpp b/src/Disks/S3/ProxyResolverConfiguration.cpp index 5b8ac99e37..8708afff75 100644 --- a/src/Disks/S3/ProxyResolverConfiguration.cpp +++ b/src/Disks/S3/ProxyResolverConfiguration.cpp @@ -23,7 +23,7 @@ ProxyResolverConfiguration::ProxyResolverConfiguration(const Poco::URI & endpoin ClientConfigurationPerRequest ProxyResolverConfiguration::getConfiguration(const Aws::Http::HttpRequest &) { - LOG_DEBUG(&Poco::Logger::get("AWSClient"), "Obtain proxy using resolver: {}", endpoint.toString()); + LOG_DEBUG(getLogger("AWSClient"), "Obtain proxy using resolver: {}", endpoint.toString()); /// 1 second is enough for now. /// TODO: Make timeouts configurable. @@ -51,7 +51,7 @@ ClientConfigurationPerRequest ProxyResolverConfiguration::getConfiguration(const /// Read proxy host as string from response body. Poco::StreamCopier::copyToString(response_body_stream, proxy_host); - LOG_DEBUG(&Poco::Logger::get("AWSClient"), "Use proxy: {}://{}:{}", proxy_scheme, proxy_host, proxy_port); + LOG_DEBUG(getLogger("AWSClient"), "Use proxy: {}://{}:{}", proxy_scheme, proxy_host, proxy_port); cfg.proxy_scheme = Aws::Http::SchemeMapper::FromString(proxy_scheme.c_str()); cfg.proxy_host = proxy_host; diff --git a/src/Disks/S3/registerDiskS3.cpp b/src/Disks/S3/registerDiskS3.cpp index 98964f005f..d5025e7aca 100644 --- a/src/Disks/S3/registerDiskS3.cpp +++ b/src/Disks/S3/registerDiskS3.cpp @@ -80,7 +80,7 @@ std::shared_ptr getProxyResolverConfiguration( throw Exception("Only HTTP/HTTPS schemas allowed in proxy resolver config: " + proxy_scheme, ErrorCodes::BAD_ARGUMENTS); auto proxy_port = proxy_resolver_config.getUInt(prefix + ".proxy_port"); - LOG_DEBUG(&Poco::Logger::get("DiskS3"), "Configured proxy resolver: {}, Scheme: {}, Port: {}", + LOG_DEBUG(getLogger("DiskS3"), "Configured proxy resolver: {}, Scheme: {}, Port: {}", endpoint.toString(), proxy_scheme, proxy_port); return std::make_shared(endpoint, proxy_scheme, proxy_port); @@ -105,7 +105,7 @@ std::shared_ptr getProxyListConfiguration( proxies.push_back(proxy_uri); - LOG_DEBUG(&Poco::Logger::get("DiskS3"), "Configured proxy: {}", proxy_uri.toString()); + LOG_DEBUG(getLogger("DiskS3"), "Configured proxy: {}", proxy_uri.toString()); } if (!proxies.empty()) diff --git a/src/Disks/StoragePolicy.cpp b/src/Disks/StoragePolicy.cpp index 5597c42ba8..317ebbfb73 100644 --- a/src/Disks/StoragePolicy.cpp +++ b/src/Disks/StoragePolicy.cpp @@ -405,7 +405,7 @@ StoragePolicySelector::StoragePolicySelector( */ policies.emplace(name, std::make_shared(name, config, config_prefix + "." + name, disks)); - LOG_INFO(&Poco::Logger::get("StoragePolicySelector"), "Storage policy {} loaded", backQuote(name)); + LOG_INFO(getLogger("StoragePolicySelector"), "Storage policy {} loaded", backQuote(name)); } /// Add default policy if it isn't explicitly specified. diff --git a/src/Disks/VolumeJBOD.cpp b/src/Disks/VolumeJBOD.cpp index 77be1c1b0f..adb53e7c7d 100644 --- a/src/Disks/VolumeJBOD.cpp +++ b/src/Disks/VolumeJBOD.cpp @@ -20,7 +20,7 @@ VolumeJBOD::VolumeJBOD( DiskSelectorPtr disk_selector) : IVolume(name_, config, config_prefix, disk_selector) { - Poco::Logger * logger = &Poco::Logger::get("StorageConfiguration"); + LoggerPtr logger = getLogger("StorageConfiguration"); auto has_max_bytes = config.has(config_prefix + ".max_data_part_size_bytes"); auto has_max_ratio = config.has(config_prefix + ".max_data_part_size_ratio"); diff --git a/src/ExternalCatalog/CnchExternalCatalogMgr.h b/src/ExternalCatalog/CnchExternalCatalogMgr.h index 1e72099968..f21a90d686 100644 --- a/src/ExternalCatalog/CnchExternalCatalogMgr.h +++ b/src/ExternalCatalog/CnchExternalCatalogMgr.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -35,6 +36,6 @@ private: std::string name_space; std::map catalog_confs; // use for check whether the catalog has been changed. std::map catalogs; // map from catalog name to externcal catalog ptr. - Poco::Logger * log = &Poco::Logger::get("CnchExternalCatalogMgr"); + LoggerPtr log = getLogger("CnchExternalCatalogMgr"); }; } diff --git a/src/ExternalCatalog/ExternalCatalogMgr.cpp b/src/ExternalCatalog/ExternalCatalogMgr.cpp index 58b399ce73..fa8c82cd8b 100644 --- a/src/ExternalCatalog/ExternalCatalogMgr.cpp +++ b/src/ExternalCatalog/ExternalCatalogMgr.cpp @@ -20,7 +20,7 @@ namespace Mgr { //TODO(renming):: add more implementation auto key_mgr_type = configPrefix() + ".type"; - auto * log = &Poco::Logger::get("ExternalCatalogMgr"); + auto log = getLogger("ExternalCatalogMgr"); if (!conf.has(key_mgr_type)) { throw Exception(fmt::format("No {} in config", key_mgr_type), ErrorCodes::BAD_ARGUMENTS); diff --git a/src/ExternalCatalog/HiveExternalCatalog.h b/src/ExternalCatalog/HiveExternalCatalog.h index 8d3b806a3c..5214328742 100644 --- a/src/ExternalCatalog/HiveExternalCatalog.h +++ b/src/ExternalCatalog/HiveExternalCatalog.h @@ -1,4 +1,5 @@ #pragma once +#include #include #include #include @@ -36,7 +37,7 @@ private: IMetaClientPtr hms_client; //TODO(ExternalCatalog):: add storage related field. - Poco::Logger * log = &Poco::Logger::get("HiveExternalCatalog"); + LoggerPtr log = getLogger("HiveExternalCatalog"); }; } diff --git a/src/ExternalCatalog/MockExternalCatalog.cpp b/src/ExternalCatalog/MockExternalCatalog.cpp index e2f3b2f836..283e0286fe 100644 --- a/src/ExternalCatalog/MockExternalCatalog.cpp +++ b/src/ExternalCatalog/MockExternalCatalog.cpp @@ -33,7 +33,7 @@ StoragePtr createStorageFromCreateQuery([[maybe_unused]] const String & catalog, {}, {},false); ret->setCreateTableSql(create_table_query); - LOG_DEBUG(&Poco::Logger::get("createStorageFromCreateQuery"), "create table from {} ", create_table_query); + LOG_DEBUG(getLogger("createStorageFromCreateQuery"), "create table from {} ", create_table_query); return ret; } diff --git a/src/ExternalCatalog/MockExternalCatalog.h b/src/ExternalCatalog/MockExternalCatalog.h index 63f70cd736..ea1a630730 100644 --- a/src/ExternalCatalog/MockExternalCatalog.h +++ b/src/ExternalCatalog/MockExternalCatalog.h @@ -1,4 +1,5 @@ #pragma once +#include #include #include #include @@ -38,7 +39,7 @@ public: private: std::string catalog_name; - Poco::Logger * log = &Poco::Logger::get("MockExternalCatalog"); + LoggerPtr log = getLogger("MockExternalCatalog"); }; } diff --git a/src/FormaterTool/HDFSDumper.h b/src/FormaterTool/HDFSDumper.h index 4e9e54ea2a..6be0b9e6c1 100644 --- a/src/FormaterTool/HDFSDumper.h +++ b/src/FormaterTool/HDFSDumper.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -50,7 +51,7 @@ public: void getFileFromRemote(const String & remote_path, const String & local_path); private: - Poco::Logger * log = &Poco::Logger::get("HDFSDumper"); + LoggerPtr log = getLogger("HDFSDumper"); size_t buffer_size; HDFSConnectionParams hdfs_params; std::unique_ptr hdfs_filesystem = nullptr; diff --git a/src/FormaterTool/PartConverter.h b/src/FormaterTool/PartConverter.h index 50a17213c0..b2fb84e168 100644 --- a/src/FormaterTool/PartConverter.h +++ b/src/FormaterTool/PartConverter.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include @@ -28,7 +29,7 @@ public: void execute() override; private: - Poco::Logger * log = &Poco::Logger::get("PartConverter"); + LoggerPtr log = getLogger("PartConverter"); String source_path; String target_path; String data_format; diff --git a/src/FormaterTool/PartMergerImpl.cpp b/src/FormaterTool/PartMergerImpl.cpp index af9f9724ad..1973b7d549 100644 --- a/src/FormaterTool/PartMergerImpl.cpp +++ b/src/FormaterTool/PartMergerImpl.cpp @@ -79,7 +79,7 @@ auto PartMergerImpl::createStorages(const std::vector & uuids, const Str return res; } -PartMergerImpl::PartMergerImpl(ContextMutablePtr context_, Poco::Util::AbstractConfiguration & config, Poco::Logger * log_) +PartMergerImpl::PartMergerImpl(ContextMutablePtr context_, Poco::Util::AbstractConfiguration & config, LoggerPtr log_) : PartToolkitBase(nullptr, context_), log(log_) { // Init arguments passed from CLI. diff --git a/src/FormaterTool/PartMergerImpl.h b/src/FormaterTool/PartMergerImpl.h index c5402a01cd..21d7a5ba28 100644 --- a/src/FormaterTool/PartMergerImpl.h +++ b/src/FormaterTool/PartMergerImpl.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include "Common/ErrorCodes.h" @@ -69,7 +70,7 @@ class PartMergerImpl : public PartToolkitBase using StorageCloudMergeTreePtr = std::shared_ptr; public: - PartMergerImpl(ContextMutablePtr context, Poco::Util::AbstractConfiguration & config, Poco::Logger * log_); + PartMergerImpl(ContextMutablePtr context, Poco::Util::AbstractConfiguration & config, LoggerPtr log_); /** * Main entry for selecting and merging. @@ -99,7 +100,7 @@ private: void executeMergeTask(MergeTreeMetaBase & merge_tree, DiskPtr & disk, const MergeTask & task); PartMergerParams params; - Poco::Logger * log; + LoggerPtr log; }; diff --git a/src/FormaterTool/PartWriter.h b/src/FormaterTool/PartWriter.h index e7aeb16116..ca62eacbdd 100644 --- a/src/FormaterTool/PartWriter.h +++ b/src/FormaterTool/PartWriter.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -45,7 +46,7 @@ public: } private: - Poco::Logger * log = &Poco::Logger::get("PartWriter"); + LoggerPtr log = getLogger("PartWriter"); String source_path; String data_format; String dest_path; diff --git a/src/Formats/MySQLBlockInputStream.cpp b/src/Formats/MySQLBlockInputStream.cpp index 0fcdb3f128..47486c08c5 100644 --- a/src/Formats/MySQLBlockInputStream.cpp +++ b/src/Formats/MySQLBlockInputStream.cpp @@ -55,7 +55,7 @@ MySQLBlockInputStream::MySQLBlockInputStream( const std::string & query_str, const Block & sample_block, const StreamSettings & settings_) - : log(&Poco::Logger::get("MySQLBlockInputStream")) + : log(getLogger("MySQLBlockInputStream")) , connection{std::make_unique(entry, query_str)} , settings{std::make_unique(settings_)} { @@ -65,7 +65,7 @@ MySQLBlockInputStream::MySQLBlockInputStream( /// For descendant MySQLWithFailoverBlockInputStream MySQLBlockInputStream::MySQLBlockInputStream(const Block &sample_block_, const StreamSettings & settings_) - : log(&Poco::Logger::get("MySQLBlockInputStream")) + : log(getLogger("MySQLBlockInputStream")) , settings(std::make_unique(settings_)) { description.init(sample_block_); diff --git a/src/Formats/MySQLBlockInputStream.h b/src/Formats/MySQLBlockInputStream.h index 12deb9c314..09344eda4b 100644 --- a/src/Formats/MySQLBlockInputStream.h +++ b/src/Formats/MySQLBlockInputStream.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -52,7 +53,7 @@ protected: mysqlxx::UseQueryResult result; }; - Poco::Logger * log; + LoggerPtr log; std::unique_ptr connection; const std::unique_ptr settings; diff --git a/src/Functions/FunctionsRuntimeFilter.h b/src/Functions/FunctionsRuntimeFilter.h index fe972ddb2d..1082e5eac4 100644 --- a/src/Functions/FunctionsRuntimeFilter.h +++ b/src/Functions/FunctionsRuntimeFilter.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -30,7 +31,7 @@ public: static FunctionPtr create(ContextPtr context) { return std::make_shared(std::move(context)); } explicit RuntimeFilterBloomFilterExists(ContextPtr context_) - : context(std::move(context_)), log(&Poco::Logger::get("RuntimeFilterBloomFilterExists")) + : context(std::move(context_)), log(getLogger("RuntimeFilterBloomFilterExists")) {} String getName() const override { return name; } @@ -122,7 +123,7 @@ public: private: ContextPtr context; - Poco::Logger * log; + LoggerPtr log; mutable std::mutex mu; mutable String dynamic_value_key; diff --git a/src/Functions/IP2GeoUDF.h b/src/Functions/IP2GeoUDF.h index c3a93b003d..46b70e5254 100644 --- a/src/Functions/IP2GeoUDF.h +++ b/src/Functions/IP2GeoUDF.h @@ -13,6 +13,7 @@ * limitations under the License. */ +#include #include #include #include @@ -56,7 +57,7 @@ class IP2GeoManager public: IP2GeoManager() { - log = &Poco::Logger::get("IP2GeoUDF"); + log = getRawLogger("IP2GeoUDF"); location_map["country"] = 0; location_map["province"] = 1; location_map["city"] = 2; @@ -103,7 +104,7 @@ public: std::unordered_map & getLocationMap() { return location_map; } private: - Poco::Logger * log; + LoggerRawPtr log; Poco::Util::Timer timer; mutable std::shared_mutex mutex_; @@ -155,10 +156,10 @@ private: int ipiperr; int ipv6err; ipdb_reader *ipipreader, *ipv6reader; - Poco::Logger * log; + LoggerRawPtr log; mutable std::shared_mutex ipipmutex_; - IPIPLocator() { log = &Poco::Logger::get("IPIPLocator"); } + IPIPLocator() { log = getRawLogger("IPIPLocator"); } }; class GeoIPLocator @@ -193,10 +194,10 @@ private: MMDB_s getISPMMDB_s() { return ispmmdb; } private: - Poco::Logger * log; + LoggerRawPtr log; mutable std::shared_mutex geoipmutex_; MMDB_s citymmdb, ispmmdb, asnmmdb; - GeoIPLocator() { log = &Poco::Logger::get("GEOIPLocator"); } + GeoIPLocator() { log = getRawLogger("GEOIPLocator"); } }; }; @@ -219,7 +220,7 @@ public: IP2GeoUDF(ContextPtr cur_context) : context(cur_context) { - log = &Poco::Logger::get("IP2GeoFunc"); + log = getLogger("IP2GeoFunc"); IP2GeoManager::getInstance().getSettings(context); } @@ -365,6 +366,6 @@ public: private: ContextPtr context; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Functions/invalidateStatsCache.cpp b/src/Functions/invalidateStatsCache.cpp index 73f3d640e8..e53fe47f53 100644 --- a/src/Functions/invalidateStatsCache.cpp +++ b/src/Functions/invalidateStatsCache.cpp @@ -67,7 +67,7 @@ ColumnPtr FunctionInvalidateStatsCache::executeImpl( if (!table_identifier_opt.has_value()) { // DO NOTHING - LOG_INFO(&Poco::Logger::get("invalidateStatsCache"), "Table " + identifier_names[0] + "." + identifier_names[1] + " not found, skip cache clear"); + LOG_INFO(getLogger("invalidateStatsCache"), "Table " + identifier_names[0] + "." + identifier_names[1] + " not found, skip cache clear"); } else { diff --git a/src/Functions/logTrace.cpp b/src/Functions/logTrace.cpp index f8d20fde89..f55b087e06 100644 --- a/src/Functions/logTrace.cpp +++ b/src/Functions/logTrace.cpp @@ -47,7 +47,7 @@ namespace throw Exception( "First argument for function " + getName() + " must be Constant string", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - static auto * log = &Poco::Logger::get("FunctionLogTrace"); + static auto log = getLogger("FunctionLogTrace"); LOG_TRACE(log, message); return DataTypeUInt8().createColumnConst(input_rows_count, 0); diff --git a/src/IO/AIOContextPool.cpp b/src/IO/AIOContextPool.cpp index 99fdfc67d9..92b49cd0d9 100644 --- a/src/IO/AIOContextPool.cpp +++ b/src/IO/AIOContextPool.cpp @@ -98,7 +98,7 @@ void AIOContextPool::fulfillPromises(const io_event events[], const int num_even const auto it = promises.find(completed_id); if (it == std::end(promises)) { - LOG_ERROR(&Poco::Logger::get("AIOcontextPool"), "Found io_event with unknown id {}", completed_id); + LOG_ERROR(getLogger("AIOcontextPool"), "Found io_event with unknown id {}", completed_id); continue; } diff --git a/src/IO/HDFSRemoteFSReader.cpp b/src/IO/HDFSRemoteFSReader.cpp index c77681bfb3..c7bf0bd303 100644 --- a/src/IO/HDFSRemoteFSReader.cpp +++ b/src/IO/HDFSRemoteFSReader.cpp @@ -28,7 +28,7 @@ namespace ErrorCodes { static void ReadBufferFromHdfsCallBack(const hdfsEvent & event) { - LOG_TRACE(&Poco::Logger::get("ReadBufferFromHDFS"), fmt::format("get event {} with value {}", + LOG_TRACE(getLogger("ReadBufferFromHDFS"), fmt::format("get event {} with value {}", event.eventType, event.value)); switch (event.eventType) { diff --git a/src/IO/HTTPCommon.cpp b/src/IO/HTTPCommon.cpp index 6733c7803d..303889cc0c 100644 --- a/src/IO/HTTPCommon.cpp +++ b/src/IO/HTTPCommon.cpp @@ -141,7 +141,7 @@ namespace size_t max_pool_size_, bool wait_on_pool_size_limit_, bool resolve_host_ = true) - : Base(max_pool_size_, &Poco::Logger::get("HTTPSessionPool"), wait_on_pool_size_limit_ ? BehaviourOnLimit::Wait : BehaviourOnLimit::AllocateNewBypassingPool) + : Base(max_pool_size_, getLogger("HTTPSessionPool"), wait_on_pool_size_limit_ ? BehaviourOnLimit::Wait : BehaviourOnLimit::AllocateNewBypassingPool) , host(host_) , port(port_) , https(https_) @@ -251,7 +251,7 @@ namespace auto msg = Poco::AnyCast(session_data); if (!msg.empty()) { - LOG_TRACE((&Poco::Logger::get("HTTPCommon")), "Failed communicating with {} with error '{}' will try to reconnect session", host, msg); + LOG_TRACE((getLogger("HTTPCommon")), "Failed communicating with {} with error '{}' will try to reconnect session", host, msg); if (resolve_host) { diff --git a/src/IO/HTTPSender.cpp b/src/IO/HTTPSender.cpp index 1a31478ab2..a743561fdf 100644 --- a/src/IO/HTTPSender.cpp +++ b/src/IO/HTTPSender.cpp @@ -19,7 +19,7 @@ HTTPSender::HTTPSender( void HTTPSender::send(const std::string & body) { - LOG_TRACE((&Poco::Logger::get("HTTPSender")), "Sending request to {}", request.getURI()); + LOG_TRACE((getLogger("HTTPSender")), "Sending request to {}", request.getURI()); if (body.empty()) { diff --git a/src/IO/RAReadBufferFromS3.cpp b/src/IO/RAReadBufferFromS3.cpp index e5b25c7874..7e5a9d3ec8 100644 --- a/src/IO/RAReadBufferFromS3.cpp +++ b/src/IO/RAReadBufferFromS3.cpp @@ -49,7 +49,7 @@ RAReadBufferFromS3::RAReadBufferFromS3(const std::shared_ptr& ReadBufferFromFileBase(buffer_size, existing_memory, alignment), throttler_(throttler), read_retry_(read_retry), reader_(client, bucket, key, buffer_size, max_buffer_expand_times, - read_expand_pct, seq_read_thres, &Poco::Logger::get("RAReadBufferFromS3")) {} + read_expand_pct, seq_read_thres, getLogger("RAReadBufferFromS3")) {} bool RAReadBufferFromS3::nextImpl() { diff --git a/src/IO/ReadBufferFromFileWithNexusFS.h b/src/IO/ReadBufferFromFileWithNexusFS.h index 950d067364..2c50b1c965 100644 --- a/src/IO/ReadBufferFromFileWithNexusFS.h +++ b/src/IO/ReadBufferFromFileWithNexusFS.h @@ -38,7 +38,7 @@ public: bool isSeekCheap() override { return false; } private: - Poco::Logger * log = &Poco::Logger::get("ReadBufferFromFileWithNexusFS"); + LoggerPtr log = getLogger("ReadBufferFromFileWithNexusFS"); const String file_name; std::unique_ptr source_read_buffer; diff --git a/src/IO/ReadBufferFromRpcStreamFile.h b/src/IO/ReadBufferFromRpcStreamFile.h index 3a5b80a404..ca6749b5b2 100644 --- a/src/IO/ReadBufferFromRpcStreamFile.h +++ b/src/IO/ReadBufferFromRpcStreamFile.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -32,7 +33,7 @@ public: private: std::shared_ptr client; size_t current_file_offset{0}; - Poco::Logger * log = &Poco::Logger::get("ReadBufferFromRpcStreamFile"); + LoggerPtr log = getLogger("ReadBufferFromRpcStreamFile"); }; } diff --git a/src/IO/ReadBufferFromS3.h b/src/IO/ReadBufferFromS3.h index f6ce51c909..d0ab9e2dc6 100644 --- a/src/IO/ReadBufferFromS3.h +++ b/src/IO/ReadBufferFromS3.h @@ -1,6 +1,7 @@ #pragma once #if !defined(ARCADIA_BUILD) +#include #include #endif @@ -35,7 +36,7 @@ private: std::optional read_result; std::unique_ptr impl; - Poco::Logger * log = &Poco::Logger::get("ReadBufferFromS3"); + LoggerPtr log = getLogger("ReadBufferFromS3"); public: explicit ReadBufferFromS3( diff --git a/src/IO/ReadWriteBufferFromHTTP.h b/src/IO/ReadWriteBufferFromHTTP.h index 25746eae9c..a9d35ebbbc 100644 --- a/src/IO/ReadWriteBufferFromHTTP.h +++ b/src/IO/ReadWriteBufferFromHTTP.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -124,7 +125,7 @@ namespace detail if (!credentials.getUsername().empty()) credentials.authenticate(request); - LOG_TRACE((&Poco::Logger::get("ReadWriteBufferFromHTTP")), "Sending request to {}", uri_.toString()); + LOG_TRACE((getLogger("ReadWriteBufferFromHTTP")), "Sending request to {}", uri_.toString()); auto sess = session->getSession(); diff --git a/src/IO/S3/AWSOptionsConfig.cpp b/src/IO/S3/AWSOptionsConfig.cpp index 1d7f0766ae..75ff37734f 100644 --- a/src/IO/S3/AWSOptionsConfig.cpp +++ b/src/IO/S3/AWSOptionsConfig.cpp @@ -18,7 +18,7 @@ Aws::Utils::Logging::LogLevel DB::S3::AWSOptionsConfig::convertStringToLogLevel( else if (log_level_str.compare("Trace") == 0) return Aws::Utils::Logging::LogLevel::Trace; else { - LOG_WARNING(&Poco::Logger::get("AWSOptionsConfig"), fmt::format("Illegal aws log level {}, please check.", log_level_str)); + LOG_WARNING(getLogger("AWSOptionsConfig"), fmt::format("Illegal aws log level {}, please check.", log_level_str)); return Aws::Utils::Logging::LogLevel::Off; } } diff --git a/src/IO/S3/Credentials.cpp b/src/IO/S3/Credentials.cpp index f0b78648d0..528b7e09a0 100644 --- a/src/IO/S3/Credentials.cpp +++ b/src/IO/S3/Credentials.cpp @@ -48,7 +48,7 @@ bool areCredentialsEmptyOrExpired(const Aws::Auth::AWSCredentials & credentials, AWSEC2MetadataClient::AWSEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration, const char * endpoint_) : Aws::Internal::AWSHttpResourceClient(client_configuration) , endpoint(endpoint_) - , logger(&Poco::Logger::get("AWSEC2InstanceProfileConfigLoader")) + , logger(getLogger("AWSEC2InstanceProfileConfigLoader")) { } @@ -197,7 +197,7 @@ Aws::String AWSEC2MetadataClient::getCurrentRegion() const std::shared_ptr InitEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration) { Aws::String ec2_metadata_service_endpoint = Aws::Environment::GetEnv("AWS_EC2_METADATA_SERVICE_ENDPOINT"); - auto * logger = &Poco::Logger::get("AWSEC2InstanceProfileConfigLoader"); + auto logger = getLogger("AWSEC2InstanceProfileConfigLoader"); if (ec2_metadata_service_endpoint.empty()) { Aws::String ec2_metadata_service_endpoint_mode = Aws::Environment::GetEnv("AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE"); @@ -235,7 +235,7 @@ std::shared_ptr InitEC2MetadataClient(const Aws::Client::C AWSEC2InstanceProfileConfigLoader::AWSEC2InstanceProfileConfigLoader(const std::shared_ptr & client_, bool use_secure_pull_) : client(client_) , use_secure_pull(use_secure_pull_) - , logger(&Poco::Logger::get("AWSEC2InstanceProfileConfigLoader")) + , logger(getLogger("AWSEC2InstanceProfileConfigLoader")) { } @@ -277,7 +277,7 @@ bool AWSEC2InstanceProfileConfigLoader::LoadInternal() AWSInstanceProfileCredentialsProvider::AWSInstanceProfileCredentialsProvider(const std::shared_ptr & config_loader) : ec2_metadata_config_loader(config_loader) , load_frequency_ms(Aws::Auth::REFRESH_THRESHOLD) - , logger(&Poco::Logger::get("AWSInstanceProfileCredentialsProvider")) + , logger(getLogger("AWSInstanceProfileCredentialsProvider")) { LOG_INFO(logger, "Creating Instance with injected EC2MetadataClient and refresh rate."); } @@ -321,7 +321,7 @@ void AWSInstanceProfileCredentialsProvider::refreshIfExpired() AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider::AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider( std::shared_ptr aws_client_configuration, uint64_t expiration_window_seconds_) - : logger(&Poco::Logger::get("AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider")) + : logger(getLogger("AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider")) , expiration_window_seconds(expiration_window_seconds_) { // check environment variables @@ -454,7 +454,7 @@ S3CredentialsProviderChain::S3CredentialsProviderChain( const Aws::Auth::AWSCredentials & credentials, CredentialsConfiguration credentials_configuration) { - auto * logger = &Poco::Logger::get("S3CredentialsProviderChain"); + auto logger = getLogger("S3CredentialsProviderChain"); /// we don't provide any credentials to avoid signing if (credentials_configuration.no_sign_request) diff --git a/src/IO/S3/Credentials.h b/src/IO/S3/Credentials.h index 3fa53e1119..0e104edf07 100644 --- a/src/IO/S3/Credentials.h +++ b/src/IO/S3/Credentials.h @@ -1,5 +1,6 @@ #pragma once +#include #include #if USE_AWS_S3 @@ -57,7 +58,7 @@ private: const Aws::String endpoint; mutable std::recursive_mutex token_mutex; mutable Aws::String token; - Poco::Logger * logger; + LoggerPtr logger; }; std::shared_ptr InitEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration); @@ -75,7 +76,7 @@ protected: private: std::shared_ptr client; bool use_secure_pull; - Poco::Logger * logger; + LoggerPtr logger; }; class AWSInstanceProfileCredentialsProvider : public Aws::Auth::AWSCredentialsProvider @@ -94,7 +95,7 @@ private: std::shared_ptr ec2_metadata_config_loader; Int64 load_frequency_ms; - Poco::Logger * logger; + LoggerPtr logger; }; class AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider : public Aws::Auth::AWSCredentialsProvider @@ -120,7 +121,7 @@ private: Aws::String session_name; Aws::String token; bool initialized = false; - Poco::Logger * logger; + LoggerPtr logger; uint64_t expiration_window_seconds; }; diff --git a/src/IO/S3/CustomCRTHttpClient.cpp b/src/IO/S3/CustomCRTHttpClient.cpp index 4168cac64a..83d3b4b78c 100644 --- a/src/IO/S3/CustomCRTHttpClient.cpp +++ b/src/IO/S3/CustomCRTHttpClient.cpp @@ -32,7 +32,7 @@ std::shared_ptr CustomCRTHttpClient::MakeRequest( ProfileEvents::increment(ProfileEvents::CRTHTTPS3GetCount); ProfileEvents::increment(ProfileEvents::CRTHTTPS3GetTime, total_watch.elapsedMicroseconds()); if (slow_read_ms_ > 0 && time >= slow_read_ms_ * 1000) { - Poco::Logger * log = &Poco::Logger::get("AWSClient"); + LoggerPtr log = getLogger("AWSClient"); LOG_DEBUG(log, fmt::format("AWS S3 slow read(over {}ms): {}, time = {}ms", slow_read_ms_, request->GetUri().GetURIString(), time/1000)); } diff --git a/src/IO/S3/PocoHTTPClient.cpp b/src/IO/S3/PocoHTTPClient.cpp index 11be811988..706f084eb4 100644 --- a/src/IO/S3/PocoHTTPClient.cpp +++ b/src/IO/S3/PocoHTTPClient.cpp @@ -133,7 +133,7 @@ void PocoHTTPClient::makeRequestInternal( Aws::Utils::RateLimits::RateLimiterInterface *, Aws::Utils::RateLimits::RateLimiterInterface *) const { - Poco::Logger * log = &Poco::Logger::get("AWSClient"); + LoggerPtr log = getLogger("AWSClient"); auto uri = request.GetUri().GetURIString(); LOG_DEBUG(log, "Make request to: {}", uri); diff --git a/src/IO/S3Common.cpp b/src/IO/S3Common.cpp index e8a23bd5a5..3d1d6a44e8 100644 --- a/src/IO/S3Common.cpp +++ b/src/IO/S3Common.cpp @@ -121,7 +121,7 @@ public: AWSLogger(Aws::Utils::Logging::LogLevel log_level) { for (auto [tag, name] : S3_LOGGER_TAG_NAMES) - tag_loggers[tag] = &Poco::Logger::get(name); + tag_loggers[tag] = getLogger(name); default_logger = tag_loggers[S3_LOGGER_TAG_NAMES[0][0]]; log_level_ = log_level; @@ -157,8 +157,8 @@ public: void Flush() final { } private: - Poco::Logger * default_logger; - std::unordered_map tag_loggers; + LoggerPtr default_logger; + std::unordered_map tag_loggers; Aws::Utils::Logging::LogLevel log_level_; }; } @@ -1095,7 +1095,7 @@ namespace S3 const std::function & filter_, size_t max_threads_, size_t batch_clean_size_) - : logger(&Poco::Logger::get("S3LazyCleaner")) + : logger(getLogger("S3LazyCleaner")) , batch_clean_size(batch_clean_size_) , filter(filter_) , s3_util(s3_util_) @@ -1250,7 +1250,7 @@ namespace S3 use_insecure_imds_request = from.use_insecure_imds_request; } - bool processReadException(Exception & e, Poco::Logger * log, const String & bucket, const String & key, size_t offset, size_t attempt) + bool processReadException(Exception & e, LoggerPtr log, const String & bucket, const String & key, size_t offset, size_t attempt) { ProfileEvents::increment(ProfileEvents::S3ReadRequestsErrors); diff --git a/src/IO/S3Common.h b/src/IO/S3Common.h index 50ba91e8cc..110b3482d3 100644 --- a/src/IO/S3Common.h +++ b/src/IO/S3Common.h @@ -18,6 +18,7 @@ # include # include # include +# include # include # include namespace Aws::S3 @@ -293,7 +294,7 @@ public: private: void lazyRemove(const std::optional & key_); - Poco::Logger * logger; + LoggerPtr logger; size_t batch_clean_size; std::function filter; @@ -335,7 +336,7 @@ struct AuthSettings }; /// return whether the exception worth retry or not -bool processReadException(Exception & e, Poco::Logger * log, const String & bucket, const String & key, size_t read_offset, size_t attempt); +bool processReadException(Exception & e, LoggerPtr log, const String & bucket, const String & key, size_t read_offset, size_t attempt); void resetSessionIfNeeded(bool read_all_range_successfully, std::optional & read_result); diff --git a/src/IO/S3RemoteFSReader.cpp b/src/IO/S3RemoteFSReader.cpp index 2904ed23fa..7e0fb9fbe1 100644 --- a/src/IO/S3RemoteFSReader.cpp +++ b/src/IO/S3RemoteFSReader.cpp @@ -117,7 +117,7 @@ uint64_t S3TrivialReader::seek(uint64_t offset) { S3ReadAheadReader::S3ReadAheadReader(const std::shared_ptr& client, const String& bucket, const String& key, size_t min_read_size, size_t max_read_expand_times, size_t read_expand_pct, - size_t seq_read_thres, Poco::Logger* logger): + size_t seq_read_thres, LoggerPtr logger): min_read_size_(min_read_size), max_read_expand_times_(max_read_expand_times), read_expand_pct_(read_expand_pct), seq_read_threshold_(seq_read_thres), logger_(logger), client_(client), bucket_(bucket), key_(key), @@ -388,7 +388,7 @@ S3RemoteFSReaderOpts::S3RemoteFSReaderOpts(const std::shared_ptr #include #include #include @@ -34,7 +35,7 @@ public: virtual const String& bucket() const = 0; virtual const String& key() const = 0; virtual std::shared_ptr client() const = 0; - virtual Poco::Logger * logger() const = 0; + virtual LoggerPtr logger() const = 0; }; class S3TrivialReader: public S3Reader { @@ -52,7 +53,7 @@ public: virtual const String& bucket() const override { return bucket_; } virtual const String& key() const override { return key_; } virtual std::shared_ptr client() const override { return client_; } - virtual Poco::Logger * logger() const override { return nullptr; } + virtual LoggerPtr logger() const override { return nullptr; } private: uint64_t readFragment(char* buffer, uint64_t offset, uint64_t size); @@ -70,7 +71,7 @@ public: S3ReadAheadReader(const std::shared_ptr& client, const String& bucket, const String& key, size_t min_read_size, size_t max_read_expand_times, size_t read_expand_pct, - size_t seq_read_thres, Poco::Logger* logger); + size_t seq_read_thres, LoggerPtr logger); virtual ~S3ReadAheadReader() override; @@ -84,7 +85,7 @@ public: virtual const String& bucket() const override { return bucket_; } virtual const String& key() const override { return key_; } virtual std::shared_ptr client() const override { return client_; } - virtual Poco::Logger * logger() const override { return logger_; } + virtual LoggerPtr logger() const override { return logger_; } private: void updateBufferSize(uint64_t size); @@ -98,7 +99,7 @@ private: const size_t read_expand_pct_; const size_t seq_read_threshold_; - Poco::Logger* logger_; + LoggerPtr logger_; std::shared_ptr client_; const String bucket_; @@ -161,7 +162,7 @@ struct S3RemoteFSReaderOpts: public RemoteFSReaderOpts { const std::shared_ptr client_; const String bucket_; - Poco::Logger* logger_; + LoggerPtr logger_; // Readahead options size_t ra_min_read_size_; diff --git a/src/IO/Scheduler/DeadlineScheduler.cpp b/src/IO/Scheduler/DeadlineScheduler.cpp index 1e67c2a136..875d6ca734 100644 --- a/src/IO/Scheduler/DeadlineScheduler.cpp +++ b/src/IO/Scheduler/DeadlineScheduler.cpp @@ -75,7 +75,7 @@ DeadlineScheduler::Options DeadlineScheduler::Options::parseFromConfig( } DeadlineScheduler::DeadlineScheduler(const Options& opts): - logger_(&Poco::Logger::get("DeadlineScheduler")), opts_(opts), + logger_(getLogger("DeadlineScheduler")), opts_(opts), last_retrieve_pos_(0, 0) { if (opts_.max_request_size_ % opts_.aligned_boundary_ != 0) { throw Exception(fmt::format("Max request size {} is not aligned to request boundary {}", diff --git a/src/IO/Scheduler/DeadlineScheduler.h b/src/IO/Scheduler/DeadlineScheduler.h index 926a9230a6..55b291818f 100644 --- a/src/IO/Scheduler/DeadlineScheduler.h +++ b/src/IO/Scheduler/DeadlineScheduler.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -131,7 +132,7 @@ private: String schedulerStatus() const; - Poco::Logger* logger_; + LoggerPtr logger_; const Options opts_; diff --git a/src/IO/Scheduler/DispatchedIOWorkerPool.h b/src/IO/Scheduler/DispatchedIOWorkerPool.h index bdf3695870..6bd08928f5 100644 --- a/src/IO/Scheduler/DispatchedIOWorkerPool.h +++ b/src/IO/Scheduler/DispatchedIOWorkerPool.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -77,7 +78,7 @@ public: DispatchedIOWorkerPool(const Options& opts, const std::vector& schedulers): - opts_(opts), logger_(&Poco::Logger::get("DispatchedIOWorkerPool")), + opts_(opts), logger_(getLogger("DispatchedIOWorkerPool")), schedulers_(schedulers), global_context_(opts.global_queue_max_length_), worker_pool_(nullptr), dispatcher_pool_(nullptr) {} @@ -203,7 +204,7 @@ private: std::atomic retrieve_non_local_ns_; }; - IOWorkerContext(const Options& opts, Poco::Logger* logger, GlobalIOWorkerContext& global_ctx, + IOWorkerContext(const Options& opts, LoggerPtr logger, GlobalIOWorkerContext& global_ctx, std::vector>& local_ctx): opts_(opts), logger_(logger), shutdown_(false), global_context_(global_ctx), local_contexts_(local_ctx), request_queue_(opts.worker_queue_max_length_) {} @@ -315,7 +316,7 @@ private: const Options& opts_; - Poco::Logger* logger_; + LoggerPtr logger_; std::atomic shutdown_; @@ -346,7 +347,7 @@ private: std::atomic request_wait_time_ns_; }; - DispatcherContext(const Options& opts, Poco::Logger* logger, SchedulerType* scheduler, + DispatcherContext(const Options& opts, LoggerPtr logger, SchedulerType* scheduler, GlobalIOWorkerContext& global_ctx, std::vector>& local_ctx): opts_(opts), logger_(logger), shutdown_(false), scheduler_(scheduler), global_ctx_(global_ctx), local_ctx_(local_ctx) {} @@ -404,7 +405,7 @@ private: const Options& opts_; - Poco::Logger* logger_; + LoggerPtr logger_; std::atomic shutdown_; @@ -417,7 +418,7 @@ private: const Options opts_; - Poco::Logger* logger_; + LoggerPtr logger_; std::vector schedulers_; diff --git a/src/IO/Scheduler/DynamicParallelIOWorkerPool.h b/src/IO/Scheduler/DynamicParallelIOWorkerPool.h index 90ce006997..92cf855a31 100644 --- a/src/IO/Scheduler/DynamicParallelIOWorkerPool.h +++ b/src/IO/Scheduler/DynamicParallelIOWorkerPool.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -76,7 +77,7 @@ public: DynamicParallelIOWorkerPool(const Options& opts, const std::vector& schedulers): - opts_(opts), logger_(&Poco::Logger::get("DynamicParallelIOWorkerPool")), + opts_(opts), logger_(getLogger("DynamicParallelIOWorkerPool")), schedulers_(schedulers), worker_pool_(nullptr), dispatcher_pool_(nullptr) {} virtual void startup() override { @@ -183,7 +184,7 @@ private: }; struct Worker { - Worker(WorkerGroup& worker_group, Poco::Logger* logger, + Worker(WorkerGroup& worker_group, LoggerPtr logger, typename std::list>::iterator worker_iter): worker_group_(worker_group), logger_(logger), opts_(worker_group.opts_), shutdown_(worker_group.shutdown_), status_(worker_group.status_), @@ -283,7 +284,7 @@ private: WorkerGroup& worker_group_; - Poco::Logger* logger_; + LoggerPtr logger_; const Options& opts_; std::atomic& shutdown_; @@ -294,7 +295,7 @@ private: }; struct WorkerGroup { - WorkerGroup(const Options& opts, Poco::Logger* logger, ThreadPool* pool): + WorkerGroup(const Options& opts, LoggerPtr logger, ThreadPool* pool): opts_(opts), shutdown_(false), logger_(logger), requests_queue_(opts.worker_group_queue_length), worker_pool_(pool), worker_num_(0) { for (size_t i = 0; i < opts_.min_thread_per_worker_group_; ++i) { @@ -353,7 +354,7 @@ private: std::atomic shutdown_; - Poco::Logger* logger_; + LoggerPtr logger_; WorkerGroupStatus status_; @@ -375,7 +376,7 @@ private: std::atomic dispatch_time_ns_; }; - Dispatcher(const Options& opts, Poco::Logger* logger, SchedulerType* scheduler, + Dispatcher(const Options& opts, LoggerPtr logger, SchedulerType* scheduler, std::vector>& worker_groups): opts_(opts), logger_(logger), shutdown_(false), scheduler_(scheduler), worker_groups_(worker_groups) {} @@ -424,7 +425,7 @@ private: } const Options& opts_; - Poco::Logger* logger_; + LoggerPtr logger_; std::atomic shutdown_; @@ -437,7 +438,7 @@ private: const Options opts_; - Poco::Logger* logger_; + LoggerPtr logger_; std::vector schedulers_; diff --git a/src/IO/Scheduler/FixedParallelIOWorkerPool.h b/src/IO/Scheduler/FixedParallelIOWorkerPool.h index 3eafdcc8b7..325ce1f8ff 100644 --- a/src/IO/Scheduler/FixedParallelIOWorkerPool.h +++ b/src/IO/Scheduler/FixedParallelIOWorkerPool.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -45,7 +46,7 @@ public: FixedParallelIOWorkerPool(const Options& opts, const std::vector& schedulers): - opts_(opts), logger_(&Poco::Logger::get("FixedParallelIOWorkerPool")), + opts_(opts), logger_(getLogger("FixedParallelIOWorkerPool")), schedulers_(schedulers), shutdown_(false), workers_(nullptr) {} virtual void startup() override { @@ -129,7 +130,7 @@ private: Status status_; }; - void workerRountine(WorkerInfo& worker_info, Poco::Logger* logger) { + void workerRountine(WorkerInfo& worker_info, LoggerPtr logger) { LOG_DEBUG(logger, fmt::format("Worker {} started up", worker_info.worker_id_)); SCOPE_EXIT({LOG_DEBUG(logger, fmt::format("Worker {} shutted down", worker_info.worker_id_));}); @@ -205,7 +206,7 @@ private: const Options opts_; - Poco::Logger* logger_; + LoggerPtr logger_; std::vector schedulers_; diff --git a/src/IO/SnappyReadBuffer.h b/src/IO/SnappyReadBuffer.h index 9b34a6326d..6dee95fa8e 100644 --- a/src/IO/SnappyReadBuffer.h +++ b/src/IO/SnappyReadBuffer.h @@ -15,10 +15,10 @@ #pragma once +#include #include #include #include -#include #include @@ -57,8 +57,6 @@ inline UInt32 GetInt(const char* buf) { #pragma GCC diagnostic ignored "-Wsign-compare" #endif -using Poco::Logger; - /* The main decompress logic is copied from Impala implementation */ template class SnappyReadBuffer : public BufferWithOwnMemory @@ -288,7 +286,7 @@ protected: if (readLen < allocUnit) break; own_compressed_buffer.resize(totalLen + allocUnit); } - LOG_DEBUG((&Logger::get("SnappyReadBuffer")), "Finish read snappy file."); + LOG_DEBUG((getLogger("SnappyReadBuffer")), "Finish read snappy file."); // set it for BlockDecompress size_compressed = totalLen; // calculate size_decompressed @@ -326,7 +324,7 @@ public: decompress(working_buffer.begin(), size_decompressed); readAndDecompress = true; - LOG_DEBUG((&Logger::get("SnappyReadBuffer")), "Finish decompress snappy file."); + LOG_DEBUG((getLogger("SnappyReadBuffer")), "Finish decompress snappy file."); } ~SnappyReadBuffer() override = default; diff --git a/src/IO/WriteBufferFromByteS3.cpp b/src/IO/WriteBufferFromByteS3.cpp index fc83b24b88..8d7571c23d 100644 --- a/src/IO/WriteBufferFromByteS3.cpp +++ b/src/IO/WriteBufferFromByteS3.cpp @@ -62,7 +62,7 @@ WriteBufferFromByteS3::WriteBufferFromByteS3( , temporary_buffer(nullptr) , last_part_size(0) , total_write_size(0) - , log(&Poco::Logger::get("WriteBufferFromByteS3")) + , log(getLogger("WriteBufferFromByteS3")) { if (max_single_put_threshold > min_segment_size) { diff --git a/src/IO/WriteBufferFromByteS3.h b/src/IO/WriteBufferFromByteS3.h index 9980fd6bfe..e55cbff755 100644 --- a/src/IO/WriteBufferFromByteS3.h +++ b/src/IO/WriteBufferFromByteS3.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -61,7 +62,7 @@ private: String multipart_upload_id; std::vector part_tags; - Poco::Logger * log; + LoggerPtr log; public: // For default settings in 16M segment, max part is about 156G diff --git a/src/IO/WriteBufferFromHTTP.cpp b/src/IO/WriteBufferFromHTTP.cpp index d7edc7eb0b..fafaee7611 100644 --- a/src/IO/WriteBufferFromHTTP.cpp +++ b/src/IO/WriteBufferFromHTTP.cpp @@ -15,7 +15,7 @@ WriteBufferFromHTTP::WriteBufferFromHTTP( request.setHost(uri.getHost()); request.setChunkedTransferEncoding(true); - LOG_TRACE((&Poco::Logger::get("WriteBufferToHTTP")), "Sending request to {}", uri.toString()); + LOG_TRACE((getLogger("WriteBufferToHTTP")), "Sending request to {}", uri.toString()); ostr = &session->sendRequest(request); } diff --git a/src/IO/WriteBufferFromS3.h b/src/IO/WriteBufferFromS3.h index e6be7a9769..d9bbfecdb7 100644 --- a/src/IO/WriteBufferFromS3.h +++ b/src/IO/WriteBufferFromS3.h @@ -1,6 +1,7 @@ #pragma once #if !defined(ARCADIA_BUILD) +#include #include #endif @@ -49,7 +50,7 @@ private: String multipart_upload_id; std::vector part_tags; - Poco::Logger * log = &Poco::Logger::get("WriteBufferFromS3"); + LoggerPtr log = getLogger("WriteBufferFromS3"); public: explicit WriteBufferFromS3( diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index 06b962b4bf..07b13cc4a8 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -1446,7 +1446,7 @@ SetPtr ActionsMatcher::tryMakeSet(const ASTFunction & node, Data & data, bool no if (BitmapIndexHelper::isNarrowArraySetFunctions(node.name)) throw; - LOG_DEBUG(&Poco::Logger::get("ActionsMatcher"), "Cannot make set for bitmap_index_funcs, fallback to normal reader"); + LOG_DEBUG(getLogger("ActionsMatcher"), "Cannot make set for bitmap_index_funcs, fallback to normal reader"); return nullptr; } return return_set; diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index 752442edad..34f8a023e5 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -1240,7 +1241,7 @@ private: /// How many RAM were used to process the query before processing the first block. Int64 memory_usage_before_aggregation = 0; - Poco::Logger * log = &Poco::Logger::get("Aggregator"); + LoggerPtr log = getLogger("Aggregator"); /// For external aggregation. mutable TemporaryFiles temporary_files; diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index dfea4455d5..f2afad6e6c 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -107,7 +107,7 @@ AsynchronousMetrics::AsynchronousMetrics( int update_period_seconds, std::shared_ptr> servers_to_start_before_tables_, std::shared_ptr> servers_, - Poco::Logger * logger_) + LoggerPtr logger_) : WithContext(global_context_) , update_period(update_period_seconds) , servers_to_start_before_tables(servers_to_start_before_tables_) @@ -769,7 +769,7 @@ void AsynchronousMetrics::update(std::chrono::system_clock::time_point update_ti Int64 difference = new_amount - amount; /// Log only if difference is high. This is for convenience. The threshold is arbitrary. // if (difference >= 1048576 || difference <= -1048576) - LOG_DEBUG(&Poco::Logger::get("AsynchronousMetrics"), + LOG_DEBUG(getLogger("AsynchronousMetrics"), "MemoryTracking: was {}, peak {}, free memory in arenas {}, hard limit will set to {}, RSS: {}, difference: {}, hualloc cache:{}", ReadableSize(amount), ReadableSize(peak), @@ -782,7 +782,7 @@ void AsynchronousMetrics::update(std::chrono::system_clock::time_point update_ti Int64 difference = new_amount - amount; /// Log only if difference is high. This is for convenience. The threshold is arbitrary. if (difference >= 1048576 || difference <= -1048576) - LOG_DEBUG(&Poco::Logger::get("AsynchronousMetrics"), + LOG_DEBUG(getLogger("AsynchronousMetrics"), "MemoryTracking: was {}, peak {}, will set to {} (RSS), difference: {}", ReadableSize(amount), ReadableSize(peak), diff --git a/src/Interpreters/AsynchronousMetrics.h b/src/Interpreters/AsynchronousMetrics.h index 375a69b4d2..2a16ec4b2c 100644 --- a/src/Interpreters/AsynchronousMetrics.h +++ b/src/Interpreters/AsynchronousMetrics.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -43,7 +44,7 @@ public: int update_period_seconds, std::shared_ptr> servers_to_start_before_tables_, std::shared_ptr> servers_, - Poco::Logger * log_); + LoggerPtr log_); ~AsynchronousMetrics(); @@ -79,7 +80,7 @@ private: /// On first run we will only collect the values to subtract later. bool first_run = true; std::chrono::system_clock::time_point previous_update_time; - Poco::Logger * logger; + LoggerPtr logger; #if defined(OS_LINUX) MemoryStatisticsOS memory_stat; diff --git a/src/Interpreters/BlockBloomFilter.cpp b/src/Interpreters/BlockBloomFilter.cpp index 965d2a81f8..f15faaedce 100644 --- a/src/Interpreters/BlockBloomFilter.cpp +++ b/src/Interpreters/BlockBloomFilter.cpp @@ -295,7 +295,7 @@ void BlockBloomFilter::mergeInplace( BlockBloomFilter && bf) for (size_t i = 0; i < bytes_in_slot * bf.slots; i++) this->data[start + i] |= bf.data[i]; } - // LOG_DEBUG(&Poco::Logger::get("BlockBloomFilter"), "merge... build rf ndv:{}-{}, slot:{}-{}, total:{}, step:{}", + // LOG_DEBUG(getLogger("BlockBloomFilter"), "merge... build rf ndv:{}-{}, slot:{}-{}, total:{}, step:{}", // this->ndv, bf.ndv, slots, bf.slots, total, step); return; } diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index 3927fb5017..f5f3aa168a 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -35,7 +35,7 @@ FileCache::FileCache( , enable_filesystem_query_cache_limit(cache_settings_.enable_filesystem_query_cache_limit) , enable_bypass_cache_with_threashold(cache_settings_.enable_bypass_cache_with_threashold) , bypass_cache_threashold(cache_settings_.bypass_cache_threashold) - , log(&Poco::Logger::get("FileCache")) + , log(getLogger("FileCache")) , main_priority(std::make_unique()) , stash_priority(std::make_unique()) , max_stash_element_size(cache_settings_.max_elements) diff --git a/src/Interpreters/Cache/FileCache.h b/src/Interpreters/Cache/FileCache.h index 28020299fd..e1cbb14ba7 100644 --- a/src/Interpreters/Cache/FileCache.h +++ b/src/Interpreters/Cache/FileCache.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -145,7 +146,7 @@ private: const size_t bypass_cache_threashold; mutable std::mutex mutex; - Poco::Logger * log; + LoggerPtr log; bool is_initialized = false; std::exception_ptr initialization_exception; diff --git a/src/Interpreters/Cache/FileSegment.cpp b/src/Interpreters/Cache/FileSegment.cpp index d8d79c4474..f8a57baf01 100644 --- a/src/Interpreters/Cache/FileSegment.cpp +++ b/src/Interpreters/Cache/FileSegment.cpp @@ -44,9 +44,9 @@ FileSegment::FileSegment( , file_key(key_) , cache(cache_) #ifndef NDEBUG - , log(&Poco::Logger::get(fmt::format("FileSegment({}) : {}", getHexUIntLowercase(key_), range().toString()))) + , log(getLogger(fmt::format("FileSegment({}) : {}", getHexUIntLowercase(key_), range().toString()))) #else - , log(&Poco::Logger::get("FileSegment")) + , log(getLogger("FileSegment")) #endif , segment_kind(settings.kind) , is_unbound(settings.unbounded) diff --git a/src/Interpreters/Cache/FileSegment.h b/src/Interpreters/Cache/FileSegment.h index 07716a6972..ef3c4e24f9 100644 --- a/src/Interpreters/Cache/FileSegment.h +++ b/src/Interpreters/Cache/FileSegment.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -329,7 +330,7 @@ private: Key file_key; FileCache * cache; - Poco::Logger * log; + LoggerPtr log; /// "detached" file segment means that it is not owned by cache ("detached" from cache). /// In general case, all file segments are owned by cache. diff --git a/src/Interpreters/Cache/LRUFileCachePriority.h b/src/Interpreters/Cache/LRUFileCachePriority.h index 4d1dfda043..a9a913af3c 100644 --- a/src/Interpreters/Cache/LRUFileCachePriority.h +++ b/src/Interpreters/Cache/LRUFileCachePriority.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -33,7 +34,7 @@ public: private: LRUQueue queue; - Poco::Logger * log = &Poco::Logger::get("LRUFileCachePriority"); + LoggerPtr log = getLogger("LRUFileCachePriority"); }; class LRUFileCachePriority::LRUFileCacheIterator : public IFileCachePriority::IIterator diff --git a/src/Interpreters/Cache/QueryCache.cpp b/src/Interpreters/Cache/QueryCache.cpp index 8ec94eb33b..4fd64d702c 100644 --- a/src/Interpreters/Cache/QueryCache.cpp +++ b/src/Interpreters/Cache/QueryCache.cpp @@ -195,7 +195,7 @@ QueryCache::Writer::Writer( if (auto entry = cache.getWithKey(key); entry.has_value() && !IsStale()(entry->key) && (entry->key.cnch_txn > source_update_time)) { skip_insert = true; /// Key already contained in cache and did not expire yet --> don't replace it - LOG_TRACE(&Poco::Logger::get("QueryCache"), "Skipped insert (non-stale entry found), query: {}", key.queryStringFromAst()); + LOG_TRACE(getLogger("QueryCache"), "Skipped insert (non-stale entry found), query: {}", key.queryStringFromAst()); } } @@ -265,14 +265,14 @@ void QueryCache::Writer::finalizeWrite() if (std::chrono::duration_cast(std::chrono::system_clock::now() - query_start_time) < min_query_runtime) { - LOG_TRACE(&Poco::Logger::get("QueryCache"), "Skipped insert (query not expensive enough), query: {}", key.queryStringFromAst()); + LOG_TRACE(getLogger("QueryCache"), "Skipped insert (query not expensive enough), query: {}", key.queryStringFromAst()); return; } if (auto entry = cache.getWithKey(key); entry.has_value() && !IsStale()(entry->key) && (entry->key.cnch_txn > source_update_time)) { /// same check as in ctor because a parallel Writer could have inserted the current key in the meantime - LOG_TRACE(&Poco::Logger::get("QueryCache"), "Skipped insert (non-stale entry found), query: {}", key.queryStringFromAst()); + LOG_TRACE(getLogger("QueryCache"), "Skipped insert (non-stale entry found), query: {}", key.queryStringFromAst()); return; } @@ -352,7 +352,7 @@ void QueryCache::Writer::finalizeWrite() if ((new_entry_size_in_bytes > max_entry_size_in_bytes) || (new_entry_size_in_rows > max_entry_size_in_rows)) { - LOG_TRACE(&Poco::Logger::get("QueryCache"), "Skipped insert (query result too big), new_entry_size_in_bytes: {} ({}), new_entry_size_in_rows: {} ({}), query: {}", new_entry_size_in_bytes, max_entry_size_in_bytes, new_entry_size_in_rows, max_entry_size_in_rows, key.queryStringFromAst()); + LOG_TRACE(getLogger("QueryCache"), "Skipped insert (query result too big), new_entry_size_in_bytes: {} ({}), new_entry_size_in_rows: {} ({}), query: {}", new_entry_size_in_bytes, max_entry_size_in_bytes, new_entry_size_in_rows, max_entry_size_in_rows, key.queryStringFromAst()); return; } @@ -387,25 +387,25 @@ QueryCache::Reader::Reader(Cache & cache_, const Key & key, TxnTimestamp source_ if (!entry.has_value()) { - LOG_TRACE(&Poco::Logger::get("QueryCache"), "No entry found for query {}", key.queryStringFromAst()); + LOG_TRACE(getLogger("QueryCache"), "No entry found for query {}", key.queryStringFromAst()); return; } if (!entry->key.is_shared && entry->key.user_name != key.user_name) { - LOG_TRACE(&Poco::Logger::get("QueryCache"), "Inaccessible entry found for query {}", key.queryStringFromAst()); + LOG_TRACE(getLogger("QueryCache"), "Inaccessible entry found for query {}", key.queryStringFromAst()); return; } if (entry->key.cnch_txn < source_update_time) { - LOG_TRACE(&Poco::Logger::get("QueryCache"), "Outdated entry found for query {}, entry txn {}, max table update time {}", key.queryStringFromAst(), entry->key.cnch_txn, source_update_time); + LOG_TRACE(getLogger("QueryCache"), "Outdated entry found for query {}, entry txn {}, max table update time {}", key.queryStringFromAst(), entry->key.cnch_txn, source_update_time); return; } if (IsStale()(entry->key)) { - LOG_TRACE(&Poco::Logger::get("QueryCache"), "Stale entry found for query {}", key.queryStringFromAst()); + LOG_TRACE(getLogger("QueryCache"), "Stale entry found for query {}", key.queryStringFromAst()); return; } @@ -443,7 +443,7 @@ QueryCache::Reader::Reader(Cache & cache_, const Key & key, TxnTimestamp source_ buildSourceFromChunks(entry->key.header, std::move(decompressed_chunks), entry->mapped->totals, entry->mapped->extremes); } - LOG_TRACE(&Poco::Logger::get("QueryCache"), "Entry found for query {}", key.queryStringFromAst()); + LOG_TRACE(getLogger("QueryCache"), "Entry found for query {}", key.queryStringFromAst()); } bool QueryCache::Reader::hasCacheEntryForKey() const @@ -554,7 +554,7 @@ TxnTimestamp getMaxUpdateTime(const std::set & storage_ids, ContextPt auto host_ports = context->getCnchTopologyMaster()->getTargetServer(UUIDHelpers::UUIDToString(storage_id.uuid), storage_id.server_vw_name, ts, true); if (host_ports.empty()) { - LOG_WARNING(&Poco::Logger::get("getMaxUpdateTime"), "Failed to get target host for {}", storage_id.getNameForLogs()); + LOG_WARNING(getLogger("getMaxUpdateTime"), "Failed to get target host for {}", storage_id.getNameForLogs()); return 0; } std::shared_ptr id = std::make_shared(); @@ -571,7 +571,7 @@ TxnTimestamp getMaxUpdateTime(const std::set & storage_ids, ContextPt if (static_cast(table_infos.size()) != p.second.size()) { - LOG_INFO(&Poco::Logger::get("getMaxUpdateTime"), "getTableInfo does return all info: send size {}, receive size {}", p.second.size(), table_infos.size()); + LOG_INFO(getLogger("getMaxUpdateTime"), "getTableInfo does return all info: send size {}, receive size {}", p.second.size(), table_infos.size()); return 0; } @@ -580,7 +580,7 @@ TxnTimestamp getMaxUpdateTime(const std::set & storage_ids, ContextPt UInt64 last_modification_time = table_info.last_modification_time(); if (last_modification_time == 0) { - LOG_INFO(&Poco::Logger::get("getMaxUpdateTime"), "last_modification time for table {}.{} is 0", table_info.database(), table_info.table()); + LOG_INFO(getLogger("getMaxUpdateTime"), "last_modification time for table {}.{} is 0", table_info.database(), table_info.table()); return 0; } @@ -591,13 +591,13 @@ TxnTimestamp getMaxUpdateTime(const std::set & storage_ids, ContextPt } catch (...) { - LOG_INFO(&Poco::Logger::get("getMaxUpdateTime"), "Failed to fetch last modification time, exception {}", getCurrentExceptionMessage(false)); + LOG_INFO(getLogger("getMaxUpdateTime"), "Failed to fetch last modification time, exception {}", getCurrentExceptionMessage(false)); max_last_modification_time = 0; } return max_last_modification_time; } -void logUsedStorageIDs(Poco::Logger * log, const std::set & storage_ids) +void logUsedStorageIDs(LoggerPtr log, const std::set & storage_ids) { LOG_DEBUG(log, "StorageIDs:"); for (auto & storage_id : storage_ids) diff --git a/src/Interpreters/Cache/QueryCache.h b/src/Interpreters/Cache/QueryCache.h index 57382acaa3..d6ca70f117 100644 --- a/src/Interpreters/Cache/QueryCache.h +++ b/src/Interpreters/Cache/QueryCache.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -213,7 +214,7 @@ private: using QueryCachePtr = std::shared_ptr; -void logUsedStorageIDs(Poco::Logger * log, const std::set & storage_ids); +void logUsedStorageIDs(LoggerPtr log, const std::set & storage_ids); struct QueryCacheContext { diff --git a/src/Interpreters/Cache/WriteBufferToFileSegment.cpp b/src/Interpreters/Cache/WriteBufferToFileSegment.cpp index a7db4c2806..e4a0db88c6 100644 --- a/src/Interpreters/Cache/WriteBufferToFileSegment.cpp +++ b/src/Interpreters/Cache/WriteBufferToFileSegment.cpp @@ -49,7 +49,7 @@ void WriteBufferToFileSegment::nextImpl() } catch (...) { - LOG_WARNING(&Poco::Logger::get("WriteBufferToFileSegment"), "Failed to write to the underlying buffer ({})", file_segment->getInfoForLog()); + LOG_WARNING(getLogger("WriteBufferToFileSegment"), "Failed to write to the underlying buffer ({})", file_segment->getInfoForLog()); throw; } diff --git a/src/Interpreters/ClusterProxy/IStreamFactory.h b/src/Interpreters/ClusterProxy/IStreamFactory.h index d981995e11..c8b5e8c5db 100644 --- a/src/Interpreters/ClusterProxy/IStreamFactory.h +++ b/src/Interpreters/ClusterProxy/IStreamFactory.h @@ -22,6 +22,7 @@ #pragma once #include +#include #include #include @@ -56,7 +57,7 @@ public: std::vector & res, Pipes & remote_pipes, Pipes & delayed_pipes, - Poco::Logger * log) = 0; + LoggerPtr log) = 0; }; } diff --git a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp index 513650fe1b..17aac784b3 100644 --- a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp +++ b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp @@ -232,7 +232,7 @@ void SelectStreamFactory::createForShard( std::vector & plans, Pipes & remote_pipes, Pipes & delayed_pipes, - Poco::Logger * log) + LoggerPtr log) { bool add_agg_info = processed_stage == QueryProcessingStage::WithMergeableState; bool add_totals = false; @@ -314,7 +314,7 @@ void SelectStreamFactory::createForShard( ProfileEvents::increment(ProfileEvents::DistributedConnectionMissingTable); if (shard_info.hasRemoteConnections()) { - LOG_WARNING(&Poco::Logger::get("ClusterProxy::SelectStreamFactory"), + LOG_WARNING(getLogger("ClusterProxy::SelectStreamFactory"), "There is no table {} on local replica of shard {}, will try remote replicas.", main_table.getNameForLogs(), shard_info.shard_num); emplace_remote_stream(); @@ -352,7 +352,7 @@ void SelectStreamFactory::createForShard( /// If we reached this point, local replica is stale. ProfileEvents::increment(ProfileEvents::DistributedConnectionStaleReplica); - LOG_WARNING(&Poco::Logger::get("ClusterProxy::SelectStreamFactory"), "Local replica of shard {} is stale (delay: {}s.)", shard_info.shard_num, local_delay); + LOG_WARNING(getLogger("ClusterProxy::SelectStreamFactory"), "Local replica of shard {} is stale (delay: {}s.)", shard_info.shard_num, local_delay); if (!settings.fallback_to_stale_replicas_for_distributed_queries) { @@ -401,7 +401,7 @@ void SelectStreamFactory::createForShard( catch (const Exception & ex) { if (ex.code() == ErrorCodes::ALL_CONNECTION_TRIES_FAILED) - LOG_WARNING(&Poco::Logger::get("ClusterProxy::SelectStreamFactory"), + LOG_WARNING(getLogger("ClusterProxy::SelectStreamFactory"), "Connections to remote replicas of local shard {} failed, will use stale local replica", shard_num); else throw; diff --git a/src/Interpreters/ClusterProxy/SelectStreamFactory.h b/src/Interpreters/ClusterProxy/SelectStreamFactory.h index 405bc49b28..c68267fed9 100644 --- a/src/Interpreters/ClusterProxy/SelectStreamFactory.h +++ b/src/Interpreters/ClusterProxy/SelectStreamFactory.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -68,7 +69,7 @@ public: std::vector & plans, Pipes & remote_pipes, Pipes & delayed_pipes, - Poco::Logger * log) override; + LoggerPtr log) override; private: const Block header; diff --git a/src/Interpreters/ClusterProxy/executeQuery.cpp b/src/Interpreters/ClusterProxy/executeQuery.cpp index 44aa411461..1da41e40e8 100644 --- a/src/Interpreters/ClusterProxy/executeQuery.cpp +++ b/src/Interpreters/ClusterProxy/executeQuery.cpp @@ -52,7 +52,7 @@ namespace ErrorCodes namespace ClusterProxy { -ContextMutablePtr updateSettingsForCluster(const Cluster & cluster, ContextPtr context, const Settings & settings, Poco::Logger * log) +ContextMutablePtr updateSettingsForCluster(const Cluster & cluster, ContextPtr context, const Settings & settings, LoggerPtr log) { Settings new_settings = settings; new_settings.queue_max_wait_ms = Cluster::saturate(new_settings.queue_max_wait_ms, settings.max_execution_time); @@ -193,7 +193,7 @@ ASTPtr rewriteSampleForDistributedTable(const ASTPtr & query_ast, size_t shard_s void executeQuery( QueryPlan & query_plan, - IStreamFactory & stream_factory, Poco::Logger * log, + IStreamFactory & stream_factory, LoggerPtr log, const ASTPtr & query_ast, ContextPtr context, const SelectQueryInfo & query_info, const ExpressionActionsPtr & sharding_key_expr, const std::string & sharding_key_column_name, @@ -322,7 +322,7 @@ void executeQuery( /// TODO: replace WorkerGroupHandle with SelectQueryInfo if worker group info is put into SelectQueryInfo void executeQuery( QueryPlan & query_plan, - IStreamFactory & stream_factory, Poco::Logger * log, + IStreamFactory & stream_factory, LoggerPtr log, const ASTPtr & query_ast, ContextPtr context, const WorkerGroupHandle & cluster) { assert(log); diff --git a/src/Interpreters/ClusterProxy/executeQuery.h b/src/Interpreters/ClusterProxy/executeQuery.h index f0e0967f39..a21b509277 100644 --- a/src/Interpreters/ClusterProxy/executeQuery.h +++ b/src/Interpreters/ClusterProxy/executeQuery.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include @@ -55,7 +56,7 @@ class IStreamFactory; /// - optimize_skip_unused_shards_nesting /// /// @return new Context with adjusted settings -ContextMutablePtr updateSettingsForCluster(const Cluster & cluster, ContextPtr context, const Settings & settings, Poco::Logger * log = nullptr); +ContextMutablePtr updateSettingsForCluster(const Cluster & cluster, ContextPtr context, const Settings & settings, LoggerPtr log = nullptr); /// removes different restrictions (like max_concurrent_queries_for_user, max_memory_usage_for_user, etc.) /// from settings and creates new context with them @@ -67,7 +68,7 @@ Settings getUserRestrictionsRemoved(const Settings & settings); /// (currently SELECT, DESCRIBE). void executeQuery( QueryPlan & query_plan, - IStreamFactory & stream_factory, Poco::Logger * log, + IStreamFactory & stream_factory, LoggerPtr log, const ASTPtr & query_ast, ContextPtr context, const SelectQueryInfo & query_info, const ExpressionActionsPtr & sharding_key_expr, const std::string & sharding_key_column_name, @@ -75,7 +76,7 @@ void executeQuery( void executeQuery( QueryPlan & query_plan, - IStreamFactory & stream_factory, Poco::Logger * log, + IStreamFactory & stream_factory, LoggerPtr log, const ASTPtr & query_ast, ContextPtr context, const WorkerGroupHandle & cluster); } diff --git a/src/Interpreters/CnchSystemLog.cpp b/src/Interpreters/CnchSystemLog.cpp index 965b0c32f3..8fd9c1ad01 100644 --- a/src/Interpreters/CnchSystemLog.cpp +++ b/src/Interpreters/CnchSystemLog.cpp @@ -207,7 +207,7 @@ bool prepareDatabaseAndTable( const String & table_name, const Poco::Util::AbstractConfiguration & config, const String & config_prefix, - Poco::Logger * log) + LoggerPtr log) { if (!config.has(config_prefix) || (context->getServerType() != ServerType::cnch_server)) @@ -266,7 +266,7 @@ bool prepareDatabaseAndTable( CnchSystemLogs::CnchSystemLogs(ContextPtr global_context) { - log = &Poco::Logger::get("CnchSystemLogs"); + log = getLogger("CnchSystemLogs"); if (global_context->getServerType() == ServerType::cnch_server) { init_task = global_context->getSchedulePool().createTask( diff --git a/src/Interpreters/CnchSystemLog.h b/src/Interpreters/CnchSystemLog.h index 7795ada44f..f04ff9e092 100644 --- a/src/Interpreters/CnchSystemLog.h +++ b/src/Interpreters/CnchSystemLog.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -116,7 +117,7 @@ private: bool initInWorker(ContextPtr global_context); BackgroundSchedulePool::TaskHolder init_task; - Poco::Logger * log; + LoggerPtr log; std::vector logs; }; diff --git a/src/Interpreters/CnchSystemLogHelper.cpp b/src/Interpreters/CnchSystemLogHelper.cpp index 2b7910e9a8..b399bcd018 100644 --- a/src/Interpreters/CnchSystemLogHelper.cpp +++ b/src/Interpreters/CnchSystemLogHelper.cpp @@ -37,7 +37,7 @@ namespace DB bool createDatabaseInCatalog( const ContextPtr & global_context, const String & database_name, - Poco::Logger * log) + LoggerPtr log) { bool ret = true; try @@ -181,7 +181,7 @@ bool createCnchTable( const String & database, const String & table, ASTPtr & create_query_ast, - Poco::Logger * log) + LoggerPtr log) { bool ret = true; ParserCreateQuery parser; @@ -224,7 +224,7 @@ bool prepareCnchTable( const String & database, const String & table, ASTPtr & create_query_ast, - Poco::Logger * log) + LoggerPtr log) { auto catalog = global_context->getCnchCatalog(); @@ -242,7 +242,7 @@ bool syncTableSchema( const String & database, const String & table, const Block & expected_block, - Poco::Logger * log) + LoggerPtr log) { bool ret = true; auto catalog = global_context->getCnchCatalog(); @@ -313,7 +313,7 @@ bool createView( ContextPtr global_context, const String & database, const String & table, - Poco::Logger * log) + LoggerPtr log) { bool ret = true; diff --git a/src/Interpreters/CnchSystemLogHelper.h b/src/Interpreters/CnchSystemLogHelper.h index ffb433ba97..6dce4ff3fc 100644 --- a/src/Interpreters/CnchSystemLogHelper.h +++ b/src/Interpreters/CnchSystemLogHelper.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include namespace DB { @@ -21,7 +22,7 @@ namespace DB bool createDatabaseInCatalog( const ContextPtr & global_context, const String & database_name, - Poco::Logger * logger); + LoggerPtr logger); /// Detects change in table schema. Does not support modification of primary/partition keys String makeAlterColumnQuery( @@ -35,26 +36,26 @@ bool createCnchTable( const String & database, const String & table, ASTPtr & create_query_ast, - Poco::Logger * logger); + LoggerPtr logger); bool prepareCnchTable( ContextPtr global_context, const String & database, const String & table, ASTPtr & create_query_ast, - Poco::Logger * logger); + LoggerPtr logger); bool syncTableSchema( ContextPtr global_context, const String & database, const String & table, const Block & expected_block, - Poco::Logger * logger); + LoggerPtr logger); bool createView( ContextPtr global_context, const String & database, const String & table, - Poco::Logger * logger); + LoggerPtr logger); }/// end namespace diff --git a/src/Interpreters/CollectJoinOnKeysVisitor.cpp b/src/Interpreters/CollectJoinOnKeysVisitor.cpp index 5c70a4bc07..9f58debc60 100644 --- a/src/Interpreters/CollectJoinOnKeysVisitor.cpp +++ b/src/Interpreters/CollectJoinOnKeysVisitor.cpp @@ -230,7 +230,7 @@ void CollectJoinOnKeysMatcher::analyzeJoinOnConditions(Data & data, ASTTableJoin for (const auto & item : columns_for_conditions_map) columns_for_join.emplace_back(item.second); - //LOG_DEBUG(&Poco::Logger::get("CollectJoinOnKeysMatcher"), "columns_for_join: {}", columns_for_join.toString()); + //LOG_DEBUG(getLogger("CollectJoinOnKeysMatcher"), "columns_for_join: {}", columns_for_join.toString()); data.analyzed_join.addInequalConditions(data.inequal_conditions, columns_for_join, data.context); } else diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index 1dcc5fd9a6..7b108adee7 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -57,7 +57,7 @@ ConcurrentHashJoin::ConcurrentHashJoin( , hash_joins_input_queue(slots) , hash_joins_task_in_queue(slots, false) { - LOG_DEBUG(&Poco::Logger::get("ConcurrentHashJoin"), fmt::format("parallel_join_rows_batch_threshold:{}", parallel_join_rows_batch_threshold)); + LOG_DEBUG(getLogger("ConcurrentHashJoin"), fmt::format("parallel_join_rows_batch_threshold:{}", parallel_join_rows_batch_threshold)); /// Non zero `max_joined_block_rows` allows to process block partially and return not processed part. /// TODO: It's not handled properly in ConcurrentHashJoin case, so we set it to 0 to disable this feature. table_join->setMaxJoinedBlockRows(0); @@ -703,7 +703,7 @@ BlockInputStreamPtr ConcurrentHashJoin::createStreamWithNonJoinedRows(const Bloc if (!parents.empty()) { - LOG_TRACE((&Poco::Logger::get("ConcurrentHashJoin")), "create ConcurrentNotJoinedBlockInputStream with total_size:{} index:{} hash_joins:{}", total_size, index, parents.size()); + LOG_TRACE((getLogger("ConcurrentHashJoin")), "create ConcurrentNotJoinedBlockInputStream with total_size:{} index:{} hash_joins:{}", total_size, index, parents.size()); return std::make_shared(std::move(parents), result_sample_block, max_block_size); } else diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 1c53eb5ce6..05c9fcf8a8 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -279,7 +279,7 @@ namespace ErrorCodes */ struct ContextSharedPart { - Poco::Logger * log = &Poco::Logger::get("Context"); + LoggerPtr log = getLogger("Context"); /// For access of most of shared objects. Recursive mutex. mutable std::recursive_mutex mutex; @@ -864,7 +864,7 @@ ReadSettings Context::getReadSettings() const if (getIOUringReader().isSupported()) res.local_fs_method = LocalFSReadMethod::io_uring; else - LOG_WARNING(&Poco::Logger::get("Context"), "IOUring is not supported, use default local_fs_method"); + LOG_WARNING(getLogger("Context"), "IOUring is not supported, use default local_fs_method"); } return res; @@ -1264,7 +1264,7 @@ VolumePtr Context::setTemporaryStorage(const String & path, const String & polic return shared->tmp_volume; } -static void setupTmpPath(Poco::Logger * log, const std::string & path) +static void setupTmpPath(LoggerPtr log, const std::string & path) try { LOG_DEBUG(log, "Setting up {} to store temporary data in it", path); @@ -1571,7 +1571,7 @@ void Context::initResourceGroupManager(const ConfigurationPtr & ) // if (!config->has("resource_groups")) // { - // LOG_DEBUG(&Poco::Logger::get("Context"), "No config found. Not creating Resource Group Manager"); + // LOG_DEBUG(getLogger("Context"), "No config found. Not creating Resource Group Manager"); // return ; // } // auto resource_group_manager_type = config->getRawString("resource_groups.type", "vw"); @@ -1579,15 +1579,15 @@ void Context::initResourceGroupManager(const ConfigurationPtr & ) // { // if (!getResourceManagerClient()) // { - // LOG_ERROR(&Poco::Logger::get("Context"), "Cannot create VW Resource Group Manager since Resource Manager client is not initialised."); + // LOG_ERROR(getLogger("Context"), "Cannot create VW Resource Group Manager since Resource Manager client is not initialised."); // return; // } - // LOG_DEBUG(&Poco::Logger::get("Context"), "Creating VW Resource Group Manager"); + // LOG_DEBUG(getLogger("Context"), "Creating VW Resource Group Manager"); // shared->resource_group_manager = std::make_shared(getGlobalContext()); // } // else if (resource_group_manager_type == "internal") // { - // LOG_DEBUG(&Poco::Logger::get("Context"), "Creating Internal Resource Group Manager"); + // LOG_DEBUG(getLogger("Context"), "Creating Internal Resource Group Manager"); // shared->resource_group_manager = std::make_shared(); // } // else @@ -4575,7 +4575,7 @@ void Context::setDefaultProfiles(const Poco::Util::AbstractConfiguration & confi shared->system_profile_name = config.getString("system_profile", shared->default_profile_name); setCurrentProfile(shared->system_profile_name); - applySettingsQuirks(settings, &Poco::Logger::get("SettingsQuirks")); + applySettingsQuirks(settings, getLogger("SettingsQuirks")); shared->buffer_profile_name = config.getString("buffer_profile", shared->system_profile_name); buffer_context = Context::createCopy(shared_from_this()); @@ -5049,7 +5049,7 @@ DeleteBitmapCachePtr Context::getDeleteBitmapCache() const void Context::setMetaChecker() { auto meta_checker = [this]() { - Poco::Logger * log = &Poco::Logger::get("MetaChecker"); + LoggerPtr log = getLogger("MetaChecker"); Stopwatch stopwatch; LOG_DEBUG(log, "Start to run metadata synchronization task."); @@ -6014,7 +6014,7 @@ void Context::clearOptimizerProfile() optimizer_profile = nullptr; } -void Context::logOptimizerProfile(Poco::Logger * log, String prefix, String name, String time, bool is_rule) +void Context::logOptimizerProfile(LoggerPtr log, String prefix, String name, String time, bool is_rule) { if (settings.log_optimizer_run_time && log) LOG_DEBUG(log, prefix + name + " " + time); diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index 2689bf3271..021531ceaa 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -1536,7 +1537,7 @@ public: void clearOptimizerProfile(); - void logOptimizerProfile(Poco::Logger * log, String prefix, String name, String time, bool is_rule = false); + void logOptimizerProfile(LoggerPtr log, String prefix, String name, String time, bool is_rule = false); const String & getTenantId() const { diff --git a/src/Interpreters/DAGGraph.h b/src/Interpreters/DAGGraph.h index 8bd5c1de62..b5a54adf37 100644 --- a/src/Interpreters/DAGGraph.h +++ b/src/Interpreters/DAGGraph.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -38,7 +39,7 @@ using StorageUnions = std::vector>; using StorageUnionsPtr = std::shared_ptr; struct SourcePruner { - SourcePruner(PlanSegmentTree * plan_segments_ptr_, Poco::Logger * log_) + SourcePruner(PlanSegmentTree * plan_segments_ptr_, LoggerPtr log_) : plan_segments_ptr(plan_segments_ptr_), log(log_) { } @@ -57,14 +58,14 @@ private: void generateUnprunableSegments(); void generateSegmentStorageMap(); PlanSegmentTree * plan_segments_ptr; - Poco::Logger * log; + LoggerPtr log; }; using SourcePrunerPtr = std::shared_ptr; struct DAGGraph { - DAGGraph() : log(&Poco::Logger::get("DAGGraph")) { async_context = std::make_shared(); } + DAGGraph() : log(getLogger("DAGGraph")) { async_context = std::make_shared(); } void joinAsyncRpcWithThrow(); void joinAsyncRpcPerStage(); void joinAsyncRpcAtLast(); @@ -103,7 +104,7 @@ struct DAGGraph butil::IOBuf query_settings_buf; SourcePrunerPtr source_pruner; - Poco::Logger * log; + LoggerPtr log; }; using DAGGraphPtr = std::shared_ptr; @@ -111,13 +112,13 @@ using DAGGraphPtr = std::shared_ptr; class AdaptiveScheduler { public: - explicit AdaptiveScheduler(const ContextPtr & context) : query_context(context), log(&Poco::Logger::get("AdaptiveScheduler")) { } + explicit AdaptiveScheduler(const ContextPtr & context) : query_context(context), log(getLogger("AdaptiveScheduler")) { } std::vector getRandomWorkerRank(); std::vector getHealthyWorkerRank(); private: const ContextPtr query_context; - Poco::Logger * log; + LoggerPtr log; }; } // namespace DB diff --git a/src/Interpreters/DDLTask.cpp b/src/Interpreters/DDLTask.cpp index f5859772fc..771c6dab76 100644 --- a/src/Interpreters/DDLTask.cpp +++ b/src/Interpreters/DDLTask.cpp @@ -189,7 +189,7 @@ ContextMutablePtr DDLTaskBase::makeQueryContext(ContextPtr from_context, const Z } -bool DDLTask::findCurrentHostID(ContextPtr global_context, Poco::Logger * log) +bool DDLTask::findCurrentHostID(ContextPtr global_context, LoggerPtr log) { bool host_in_hostlist = false; @@ -221,7 +221,7 @@ bool DDLTask::findCurrentHostID(ContextPtr global_context, Poco::Logger * log) return host_in_hostlist; } -void DDLTask::setClusterInfo(ContextPtr context, Poco::Logger * log) +void DDLTask::setClusterInfo(ContextPtr context, LoggerPtr log) { auto * query_on_cluster = dynamic_cast(query.get()); if (!query_on_cluster) diff --git a/src/Interpreters/DDLTask.h b/src/Interpreters/DDLTask.h index 53a952896d..3c5254ba42 100644 --- a/src/Interpreters/DDLTask.h +++ b/src/Interpreters/DDLTask.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -119,9 +120,9 @@ struct DDLTask : public DDLTaskBase { DDLTask(const String & name, const String & path) : DDLTaskBase(name, path) {} - bool findCurrentHostID(ContextPtr global_context, Poco::Logger * log); + bool findCurrentHostID(ContextPtr global_context, LoggerPtr log); - void setClusterInfo(ContextPtr context, Poco::Logger * log); + void setClusterInfo(ContextPtr context, LoggerPtr log); String getShardID() const override; diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index 86233342b9..1aad62239e 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -104,7 +104,7 @@ public: zookeeper(zookeeper_), lock_path(fs::path(lock_prefix_) / lock_name_), lock_message(lock_message_), - log(&Poco::Logger::get("zkutil::Lock")) + log(getLogger("zkutil::Lock")) { zookeeper->createIfNotExists(lock_prefix_, ""); } @@ -157,7 +157,7 @@ private: std::string lock_path; std::string lock_message; - Poco::Logger * log; + LoggerPtr log; }; @@ -179,7 +179,7 @@ DDLWorker::DDLWorker( const String & logger_name, const CurrentMetrics::Metric * max_entry_metric_) : context(Context::createCopy(context_)) - , log(&Poco::Logger::get(logger_name)) + , log(getLogger(logger_name)) , pool_size(pool_size_) , max_entry_metric(max_entry_metric_) { diff --git a/src/Interpreters/DDLWorker.h b/src/Interpreters/DDLWorker.h index 0dbd895fb4..8677216a08 100644 --- a/src/Interpreters/DDLWorker.h +++ b/src/Interpreters/DDLWorker.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -132,7 +133,7 @@ protected: void runCleanupThread(); ContextMutablePtr context; - Poco::Logger * log; + LoggerPtr log; std::string host_fqdn; /// current host domain name std::string host_fqdn_id; /// host_name:port diff --git a/src/Interpreters/DNSCacheUpdater.cpp b/src/Interpreters/DNSCacheUpdater.cpp index 27d0c3abd9..f746b33050 100644 --- a/src/Interpreters/DNSCacheUpdater.cpp +++ b/src/Interpreters/DNSCacheUpdater.cpp @@ -22,7 +22,7 @@ void DNSCacheUpdater::run() /// Reload cluster config if IP of any host has been changed since last update. if (resolver.updateCache()) { - LOG_INFO(&Poco::Logger::get("DNSCacheUpdater"), "IPs of some hosts have been changed. Will reload cluster config."); + LOG_INFO(getLogger("DNSCacheUpdater"), "IPs of some hosts have been changed. Will reload cluster config."); try { getContext()->reloadClusterConfig(); @@ -43,7 +43,7 @@ void DNSCacheUpdater::run() void DNSCacheUpdater::start() { - LOG_INFO(&Poco::Logger::get("DNSCacheUpdater"), "Update period {} seconds", update_period_seconds); + LOG_INFO(getLogger("DNSCacheUpdater"), "Update period {} seconds", update_period_seconds); task_handle->activateAndSchedule(); } diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 137ca5248e..b71f691015 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -726,7 +726,7 @@ std::unique_ptr DatabaseCatalog::database_catalog; DatabaseCatalog::DatabaseCatalog(ContextMutablePtr global_context_) : WithMutableContext(global_context_) , view_dependencies("DatabaseCatalog") - , log(&Poco::Logger::get("DatabaseCatalog")) + , log(getLogger("DatabaseCatalog")) , use_cnch_catalog{global_context_->getServerType() == ServerType::cnch_server} { TemporaryLiveViewCleaner::init(global_context_); diff --git a/src/Interpreters/DatabaseCatalog.h b/src/Interpreters/DatabaseCatalog.h index bccbece662..8a1efa7198 100644 --- a/src/Interpreters/DatabaseCatalog.h +++ b/src/Interpreters/DatabaseCatalog.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -298,7 +299,7 @@ private: UUIDToDatabaseMap db_uuid_map; UUIDToStorageMap uuid_map; - Poco::Logger * log; + LoggerPtr log; /// Do not allow simultaneous execution of DDL requests on the same table. /// database name -> database guard -> (table name mutex, counter), diff --git a/src/Interpreters/DistributedStages/ExchangeDataTracker.h b/src/Interpreters/DistributedStages/ExchangeDataTracker.h index a7ba45b7ab..39f38c2160 100644 --- a/src/Interpreters/DistributedStages/ExchangeDataTracker.h +++ b/src/Interpreters/DistributedStages/ExchangeDataTracker.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -135,7 +136,7 @@ private: std::unordered_map exchange_statuses; // Store all exchange ids for query, used to delete exchange statuses for a query. std::unordered_map> query_exchange_ids; - Poco::Logger * log = &Poco::Logger::get("ExchangeStatusTracker"); + LoggerPtr log = getLogger("ExchangeStatusTracker"); }; using ExchangeStatusTrackerPtr = std::shared_ptr; diff --git a/src/Interpreters/DistributedStages/InterpreterDistributedStages.cpp b/src/Interpreters/DistributedStages/InterpreterDistributedStages.cpp index bdde12217d..5b23435810 100644 --- a/src/Interpreters/DistributedStages/InterpreterDistributedStages.cpp +++ b/src/Interpreters/DistributedStages/InterpreterDistributedStages.cpp @@ -48,7 +48,7 @@ namespace ErrorCodes InterpreterDistributedStages::InterpreterDistributedStages(const ASTPtr & query_ptr_, ContextMutablePtr context_) : query_ptr(query_ptr_->clone()) , context(std::move(context_)) - , log(&Poco::Logger::get("InterpreterDistributedStages")) + , log(getLogger("InterpreterDistributedStages")) , plan_segment_tree(std::make_unique()) { initSettings(); @@ -158,7 +158,7 @@ void MockSendPlanSegment(ContextPtr query_context) connection->sendPlanSegment(connection_timeouts, plan_segment.get(), &settings, &query_context->getClientInfo()); connection->poll(1000); Packet packet = connection->receivePacket(); - LOG_TRACE(&Poco::Logger::get("MockSendPlanSegment"), "sendPlanSegmentToLocal finish:" + std::to_string(packet.type)); + LOG_TRACE(getLogger("MockSendPlanSegment"), "sendPlanSegmentToLocal finish:" + std::to_string(packet.type)); switch (packet.type) { case Protocol::Server::Exception: diff --git a/src/Interpreters/DistributedStages/InterpreterDistributedStages.h b/src/Interpreters/DistributedStages/InterpreterDistributedStages.h index a51ed0d2c3..dcd6b2b3cb 100644 --- a/src/Interpreters/DistributedStages/InterpreterDistributedStages.h +++ b/src/Interpreters/DistributedStages/InterpreterDistributedStages.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -64,7 +65,7 @@ private: ASTPtr query_ptr; ContextMutablePtr context; - Poco::Logger * log; + LoggerPtr log; PlanSegmentTreePtr plan_segment_tree = nullptr; }; diff --git a/src/Interpreters/DistributedStages/MPPQueryCoordinator.cpp b/src/Interpreters/DistributedStages/MPPQueryCoordinator.cpp index effb964407..f1a611ead8 100644 --- a/src/Interpreters/DistributedStages/MPPQueryCoordinator.cpp +++ b/src/Interpreters/DistributedStages/MPPQueryCoordinator.cpp @@ -178,7 +178,7 @@ MPPQueryCoordinator::MPPQueryCoordinator( , options(std::move(options_)) , plan_segment_tree(std::move(plan_segment_tree_)) , query_id(query_context->getClientInfo().current_query_id) - , log(&Poco::Logger::get("MPPQueryCoordinator")) + , log(getLogger("MPPQueryCoordinator")) , state_machine(std::make_unique(this)) , progress_manager(query_id) { diff --git a/src/Interpreters/DistributedStages/MPPQueryCoordinator.h b/src/Interpreters/DistributedStages/MPPQueryCoordinator.h index 03cd7ac93d..2025f2c1e4 100644 --- a/src/Interpreters/DistributedStages/MPPQueryCoordinator.h +++ b/src/Interpreters/DistributedStages/MPPQueryCoordinator.h @@ -1,4 +1,5 @@ #pragma once +#include #include #include #include @@ -71,7 +72,7 @@ private: MPPQueryOptions options; std::shared_ptr plan_segment_tree; const String & query_id; - Poco::Logger * log; + LoggerPtr log; // All allowed lock order: (state_machine_mutex,status_mutex) or (status_mutex) or (state_machine_mutex) mutable bthread::Mutex state_machine_mutex; diff --git a/src/Interpreters/DistributedStages/MPPQueryManager.h b/src/Interpreters/DistributedStages/MPPQueryManager.h index 6ac66f8796..02f41ab216 100644 --- a/src/Interpreters/DistributedStages/MPPQueryManager.h +++ b/src/Interpreters/DistributedStages/MPPQueryManager.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -43,7 +44,7 @@ private: 8, bthread::Mutex>; MPPCoordinatorMap coordinator_map; - Poco::Logger * log {&Poco::Logger::get("MPPQueryManager")}; + LoggerPtr log {getLogger("MPPQueryManager")}; }; } diff --git a/src/Interpreters/DistributedStages/PlanSegmentExecutor.cpp b/src/Interpreters/DistributedStages/PlanSegmentExecutor.cpp index cd6d81a6c6..86b2079b98 100644 --- a/src/Interpreters/DistributedStages/PlanSegmentExecutor.cpp +++ b/src/Interpreters/DistributedStages/PlanSegmentExecutor.cpp @@ -120,7 +120,7 @@ PlanSegmentExecutor::PlanSegmentExecutor( , plan_segment_instance(std::move(plan_segment_instance_)) , plan_segment(plan_segment_instance->plan_segment.get()) , plan_segment_outputs(plan_segment_instance->plan_segment->getPlanSegmentOutputs()) - , logger(&Poco::Logger::get("PlanSegmentExecutor")) + , logger(getLogger("PlanSegmentExecutor")) , query_log_element(std::make_unique()) { options = ExchangeUtils::getExchangeOptions(context); @@ -138,7 +138,7 @@ PlanSegmentExecutor::PlanSegmentExecutor( , plan_segment(plan_segment_instance->plan_segment.get()) , plan_segment_outputs(plan_segment_instance->plan_segment->getPlanSegmentOutputs()) , options(std::move(options_)) - , logger(&Poco::Logger::get("PlanSegmentExecutor")) + , logger(getLogger("PlanSegmentExecutor")) , query_log_element(std::make_unique()) { prepareSegmentInfo(); @@ -239,7 +239,7 @@ std::optional PlanSegmentExecutor::execute BlockIO PlanSegmentExecutor::lazyExecute(bool /*add_output_processors*/) { - LOG_DEBUG(&Poco::Logger::get("PlanSegmentExecutor"), "lazyExecute: {}", plan_segment->getPlanSegmentId()); + LOG_DEBUG(getLogger("PlanSegmentExecutor"), "lazyExecute: {}", plan_segment->getPlanSegmentId()); BlockIO res; // Will run as master query and already initialized if (!CurrentThread::get().getQueryContext() || CurrentThread::get().getQueryContext().get() != context.get()) @@ -487,7 +487,7 @@ void PlanSegmentExecutor::doExecute() } } -static QueryPlanOptimizationSettings buildOptimizationSettingsWithCheck(Poco::Logger * log, ContextMutablePtr& context) +static QueryPlanOptimizationSettings buildOptimizationSettingsWithCheck(LoggerPtr log, ContextMutablePtr& context) { QueryPlanOptimizationSettings settings = QueryPlanOptimizationSettings::fromContext(context); if(!settings.enable_optimizer) @@ -785,7 +785,7 @@ void PlanSegmentExecutor::buildPipeline(QueryPipelinePtr & pipeline, BroadcastSe throw Exception("Plan segment has no exchange sender!", ErrorCodes::LOGICAL_ERROR); } -void PlanSegmentExecutor::registerAllExchangeReceivers(Poco::Logger * log, const QueryPipeline & pipeline, UInt32 register_timeout_ms) +void PlanSegmentExecutor::registerAllExchangeReceivers(LoggerPtr log, const QueryPipeline & pipeline, UInt32 register_timeout_ms) { const Processors & procesors = pipeline.getProcessors(); std::vector async_results; diff --git a/src/Interpreters/DistributedStages/PlanSegmentExecutor.h b/src/Interpreters/DistributedStages/PlanSegmentExecutor.h index 5479d3feaf..733c0979f2 100644 --- a/src/Interpreters/DistributedStages/PlanSegmentExecutor.h +++ b/src/Interpreters/DistributedStages/PlanSegmentExecutor.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -72,7 +73,7 @@ public: std::optional execute(); BlockIO lazyExecute(bool add_output_processors = false); - static void registerAllExchangeReceivers(Poco::Logger * log, const QueryPipeline & pipeline, UInt32 register_timeout_ms); + static void registerAllExchangeReceivers(LoggerPtr log, const QueryPipeline & pipeline, UInt32 register_timeout_ms); protected: void doExecute(); @@ -87,7 +88,7 @@ private: PlanSegment * plan_segment; PlanSegmentOutputs plan_segment_outputs; ExchangeOptions options; - Poco::Logger * logger; + LoggerPtr logger; RuntimeSegmentsMetrics metrics; std::unique_ptr query_log_element; SenderMetrics sender_metrics; diff --git a/src/Interpreters/DistributedStages/PlanSegmentManagerRpcService.h b/src/Interpreters/DistributedStages/PlanSegmentManagerRpcService.h index 3cc6a2f256..324fda38ff 100644 --- a/src/Interpreters/DistributedStages/PlanSegmentManagerRpcService.h +++ b/src/Interpreters/DistributedStages/PlanSegmentManagerRpcService.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -38,7 +39,7 @@ class Context; class ResourceMonitorTimer : public RepeatedTimerTask { public: - ResourceMonitorTimer(ContextMutablePtr & global_context_, UInt64 interval_, const std::string& name_, Poco::Logger* log_) : + ResourceMonitorTimer(ContextMutablePtr & global_context_, UInt64 interval_, const std::string& name_, LoggerPtr log_) : RepeatedTimerTask(global_context_->getSchedulePool(), interval_, name_), resource_monitor(global_context_) { log = log_; } @@ -51,7 +52,7 @@ private: ResourceMonitor resource_monitor; WorkerNodeResourceData cached_resource_data; mutable std::mutex resource_data_mutex; - Poco::Logger * log; + LoggerPtr log; }; class PlanSegmentManagerRpcService : public Protos::PlanSegmentManagerService @@ -59,7 +60,7 @@ class PlanSegmentManagerRpcService : public Protos::PlanSegmentManagerService public: explicit PlanSegmentManagerRpcService(ContextMutablePtr context_) : context(context_) - , log(&Poco::Logger::get("PlanSegmentManagerRpcService")) + , log(getLogger("PlanSegmentManagerRpcService")) { report_metrics_timer = std::make_unique(context, 1000, "ResourceMonitorTimer", log); report_metrics_timer->start(); @@ -167,7 +168,7 @@ private: ContextMutablePtr context; std::unique_ptr report_metrics_timer; - Poco::Logger * log; + LoggerPtr log; }; REGISTER_SERVICE_IMPL(PlanSegmentManagerRpcService); diff --git a/src/Interpreters/DistributedStages/PlanSegmentProcessList.h b/src/Interpreters/DistributedStages/PlanSegmentProcessList.h index 37f6c2b168..fdd063237f 100644 --- a/src/Interpreters/DistributedStages/PlanSegmentProcessList.h +++ b/src/Interpreters/DistributedStages/PlanSegmentProcessList.h @@ -16,6 +16,7 @@ #pragma once +#include #include #include #include @@ -191,7 +192,7 @@ private: Container initail_query_to_groups; mutable bthread::Mutex mutex; mutable bthread::ConditionVariable remove_group; - Poco::Logger * logger = &Poco::Logger::get("PlanSegmentProcessList"); + LoggerPtr logger = getLogger("PlanSegmentProcessList"); }; } diff --git a/src/Interpreters/DistributedStages/PlanSegmentReport.cpp b/src/Interpreters/DistributedStages/PlanSegmentReport.cpp index 49be149198..6f8655a3c7 100644 --- a/src/Interpreters/DistributedStages/PlanSegmentReport.cpp +++ b/src/Interpreters/DistributedStages/PlanSegmentReport.cpp @@ -14,7 +14,7 @@ namespace ErrorCodes void reportExecutionResult(const PlanSegmentExecutor::ExecutionResult & result) noexcept { - static auto * logger = &Poco::Logger::get("PlanSegmentExecutor"); + static auto logger = getLogger("PlanSegmentExecutor"); try { if (result.segment_profile) @@ -148,7 +148,7 @@ PlanSegmentExecutor::ExecutionResult convertSuccessPlanSegmentStatusToResult( void reportSuccessPlanSegmentProfile(const PlanSegmentExecutor::ExecutionResult & result) { - static auto * logger = &Poco::Logger::get("PlanSegmentExecutor"); + static auto logger = getLogger("PlanSegmentExecutor"); try { std::shared_ptr rpc_client = RpcChannelPool::getInstance().getClient( diff --git a/src/Interpreters/DistributedStages/ProgressManager.cpp b/src/Interpreters/DistributedStages/ProgressManager.cpp index f0ebb1eebb..924ce1d89d 100644 --- a/src/Interpreters/DistributedStages/ProgressManager.cpp +++ b/src/Interpreters/DistributedStages/ProgressManager.cpp @@ -11,7 +11,7 @@ namespace DB { TCPProgressSender::TCPProgressSender(std::function send_tcp_progress_, size_t interval_) - : logger(&Poco::Logger::get("ProgressManager")), send_tcp_progress(send_tcp_progress_), interval(interval_) + : logger(getLogger("ProgressManager")), send_tcp_progress(send_tcp_progress_), interval(interval_) { if (send_tcp_progress && interval) { diff --git a/src/Interpreters/DistributedStages/ProgressManager.h b/src/Interpreters/DistributedStages/ProgressManager.h index 9f97bcdcae..9361662623 100644 --- a/src/Interpreters/DistributedStages/ProgressManager.h +++ b/src/Interpreters/DistributedStages/ProgressManager.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -19,7 +20,7 @@ public: ~TCPProgressSender(); private: - Poco::Logger * logger; + LoggerPtr logger; std::atomic_bool shutdown = {false}; std::mutex mu; std::condition_variable var; @@ -31,7 +32,7 @@ private: class ProgressManager { public: - explicit ProgressManager(const String & query_id_) : log(&Poco::Logger::get("ProgressManager")), query_id(query_id_) + explicit ProgressManager(const String & query_id_) : log(getLogger("ProgressManager")), query_id(query_id_) { } /// normal progress received from sendProgress rpc @@ -46,7 +47,7 @@ public: } private: - Poco::Logger * log; + LoggerPtr log; String query_id; /// only collects progress in worker segments Progress progress; diff --git a/src/Interpreters/DistributedStages/Scheduler.h b/src/Interpreters/DistributedStages/Scheduler.h index 8656170933..7884d35738 100644 --- a/src/Interpreters/DistributedStages/Scheduler.h +++ b/src/Interpreters/DistributedStages/Scheduler.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -114,7 +115,7 @@ public: , node_selector(cluster_nodes, query_context, dag_graph_ptr) , local_address(getLocalAddress(*query_context)) , batch_schedule(batch_schedule_) - , log(&Poco::Logger::get("Scheduler")) + , log(getLogger("Scheduler")) { cluster_nodes.all_workers.emplace_back(local_address, NodeType::Local, ""); timespec query_expiration_ts = query_context->getQueryExpirationTimeStamp(); @@ -161,7 +162,7 @@ protected: bool batch_schedule = false; BatchPlanSegmentHeaders batch_segment_headers; - Poco::Logger * log; + LoggerPtr log; void genTopology(); virtual void genTasks() = 0; diff --git a/src/Interpreters/DistributedStages/executePlanSegment.cpp b/src/Interpreters/DistributedStages/executePlanSegment.cpp index a141c8c5f1..b215b9683a 100644 --- a/src/Interpreters/DistributedStages/executePlanSegment.cpp +++ b/src/Interpreters/DistributedStages/executePlanSegment.cpp @@ -117,7 +117,7 @@ static void OnSendPlanSegmentCallback( if (cntl->Failed()) { LOG_ERROR( - &Poco::Logger::get("executePlanSegment"), + getLogger("executePlanSegment"), "send plansegment to {} failed, error: {}, msg: {}", butil::endpoint2str(cntl->remote_side()).c_str(), cntl->ErrorText(), @@ -131,7 +131,7 @@ static void OnSendPlanSegmentCallback( else { LOG_TRACE( - &Poco::Logger::get("executePlanSegment"), "send plansegment to {} success", butil::endpoint2str(cntl->remote_side()).c_str()); + getLogger("executePlanSegment"), "send plansegment to {} success", butil::endpoint2str(cntl->remote_side()).c_str()); async_context->asyncComplete(cntl->call_id(), result); } } diff --git a/src/Interpreters/EmbeddedDictionaries.cpp b/src/Interpreters/EmbeddedDictionaries.cpp index 947768ad3d..af30759f4d 100644 --- a/src/Interpreters/EmbeddedDictionaries.cpp +++ b/src/Interpreters/EmbeddedDictionaries.cpp @@ -125,7 +125,7 @@ EmbeddedDictionaries::EmbeddedDictionaries( ContextPtr context_, const bool throw_on_error) : WithContext(context_) - , log(&Poco::Logger::get("EmbeddedDictionaries")) + , log(getLogger("EmbeddedDictionaries")) , geo_dictionaries_loader(std::move(geo_dictionaries_loader_)) , reload_period(getContext()->getConfigRef().getInt("builtin_dictionaries_reload_interval", 3600)) { diff --git a/src/Interpreters/EmbeddedDictionaries.h b/src/Interpreters/EmbeddedDictionaries.h index 674b3a7f01..be2f72fdec 100644 --- a/src/Interpreters/EmbeddedDictionaries.h +++ b/src/Interpreters/EmbeddedDictionaries.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -25,7 +26,7 @@ namespace DB class EmbeddedDictionaries : WithContext { private: - Poco::Logger * log; + LoggerPtr log; MultiVersion regions_hierarchies; MultiVersion regions_names; diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index a005c56d48..3236446881 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -656,7 +656,7 @@ void ExpressionAnalyzer::initGlobalSubqueriesAndExternalTables(bool do_global) { if (do_global && !getContext()->getSettingsRef().distributed_perfect_shard) { - LOG_DEBUG(&Poco::Logger::get("ExpressionAnalyzer::initGlobalSubqueriesAndExternalTables"), "input query-{}", queryToString(query)); + LOG_DEBUG(getLogger("ExpressionAnalyzer::initGlobalSubqueriesAndExternalTables"), "input query-{}", queryToString(query)); GlobalSubqueriesVisitor::Data subqueries_data( getContext(), subquery_depth, isRemoteStorage(), external_tables, subqueries_for_sets, has_global_subqueries); GlobalSubqueriesVisitor(subqueries_data).visit(query); @@ -1302,7 +1302,7 @@ static std::shared_ptr makeJoin(std::shared_ptr analyzed_join, { if (analyzed_join->allowParallelHashJoin()) { - LOG_TRACE(&Poco::Logger::get("SelectQueryExpressionAnalyzer::makeJoin"), "will use ConcurrentHashJoin"); + LOG_TRACE(getLogger("SelectQueryExpressionAnalyzer::makeJoin"), "will use ConcurrentHashJoin"); return std::make_shared(analyzed_join, context->getSettings().max_threads, context->getSettings().parallel_join_rows_batch_threshold, r_sample_block); } return std::make_shared(analyzed_join, r_sample_block); @@ -1317,10 +1317,10 @@ static std::shared_ptr makeJoin(std::shared_ptr analyzed_join, auto parallel = (context->getSettingsRef().grace_hash_join_left_side_parallel != 0 ? context->getSettingsRef().grace_hash_join_left_side_parallel: context->getSettings().max_threads); return std::make_shared(context, analyzed_join, l_sample_block, r_sample_block, context->getTempDataOnDisk(), parallel, context->getSettingsRef().spill_mode == SpillMode::AUTO, false, context->getSettings().max_threads); } else if (allow_merge_join) { // fallback into merge join - LOG_WARNING(&Poco::Logger::get("SelectQueryExpressionAnalyzer::makeJoin"), "Grace hash join is not support, fallback into merge join."); + LOG_WARNING(getLogger("SelectQueryExpressionAnalyzer::makeJoin"), "Grace hash join is not support, fallback into merge join."); return {std::make_shared(analyzed_join, r_sample_block)}; } else { // fallback into hash join when grace hash and merge join not supported - LOG_WARNING(&Poco::Logger::get("SelectQueryExpressionAnalyzer::makeJoin"), "Grace hash join and merge join is not support, fallback into hash join."); + LOG_WARNING(getLogger("SelectQueryExpressionAnalyzer::makeJoin"), "Grace hash join and merge join is not support, fallback into hash join."); return {std::make_shared(analyzed_join, r_sample_block)}; } } @@ -1965,7 +1965,7 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendProjectResult(ExpressionActio // This is probably not the best way to do it. Should _partition_id even be allowed here? if (required_result_columns_not_present.count(column) > 0) { - LOG_DEBUG(&Poco::Logger::get("SelectQueryExpressionAnalyzer::appendProjectResult"), "Column not present: {}", column); + LOG_DEBUG(getLogger("SelectQueryExpressionAnalyzer::appendProjectResult"), "Column not present: {}", column); continue; } result_columns.emplace_back(column, std::string{}); diff --git a/src/Interpreters/ExpressionJIT.cpp b/src/Interpreters/ExpressionJIT.cpp index b8c5bd3537..4d7eef1fd1 100644 --- a/src/Interpreters/ExpressionJIT.cpp +++ b/src/Interpreters/ExpressionJIT.cpp @@ -11,8 +11,9 @@ #include #include #include -#include +#include #include +#include #include #include #include @@ -41,12 +42,6 @@ static CHJIT & getJITInstance() return jit; } -static Poco::Logger * getLogger() -{ - static Poco::Logger & logger = Poco::Logger::get("ExpressionJIT"); - return &logger; -} - class CompiledFunctionHolder : public CompiledExpressionCacheEntry { public: @@ -332,7 +327,7 @@ static FunctionBasePtr compile( { auto [compiled_function_cache_entry, _] = compilation_cache->getOrSet(hash_key, [&] () { - LOG_TRACE(getLogger(), "Compile expression {}", llvm_function->getName()); + LOG_TRACE(getLogger("ExpressionJIT"), "Compile expression {}", llvm_function->getName()); auto compiled_function = compileFunction(getJITInstance(), *llvm_function); return std::make_shared(compiled_function); }); diff --git a/src/Interpreters/ExternalDictionariesLoader.cpp b/src/Interpreters/ExternalDictionariesLoader.cpp index ddd11d460d..dec03fd808 100644 --- a/src/Interpreters/ExternalDictionariesLoader.cpp +++ b/src/Interpreters/ExternalDictionariesLoader.cpp @@ -46,7 +46,7 @@ namespace ErrorCodes /// Must not acquire Context lock in constructor to avoid possibility of deadlocks. ExternalDictionariesLoader::ExternalDictionariesLoader(ContextPtr global_context_) - : ExternalLoader("external dictionary", &Poco::Logger::get("ExternalDictionariesLoader")) + : ExternalLoader("external dictionary", getLogger("ExternalDictionariesLoader")) , WithContext(global_context_) { setConfigSettings({"dictionary", "name", "database", "uuid"}); diff --git a/src/Interpreters/ExternalLoader.cpp b/src/Interpreters/ExternalLoader.cpp index 01418f6fbc..37cfb30c0e 100644 --- a/src/Interpreters/ExternalLoader.cpp +++ b/src/Interpreters/ExternalLoader.cpp @@ -101,7 +101,7 @@ namespace class ExternalLoader::LoadablesConfigReader : private boost::noncopyable { public: - LoadablesConfigReader(const String & type_name_, Poco::Logger * log_) + LoadablesConfigReader(const String & type_name_, LoggerPtr log_) : type_name(type_name_), log(log_) { } @@ -376,7 +376,7 @@ private: } const String type_name; - Poco::Logger * log; + LoggerPtr log; std::mutex mutex; ExternalLoaderConfigSettings settings; @@ -399,7 +399,7 @@ public: LoadingDispatcher( const CreateObjectFunction & create_object_function_, const String & type_name_, - Poco::Logger * log_) + LoggerPtr log_) : create_object(create_object_function_) , type_name(type_name_) , log(log_) @@ -1181,7 +1181,7 @@ private: const CreateObjectFunction create_object; const String type_name; - Poco::Logger * log; + LoggerPtr log; mutable std::mutex mutex; std::condition_variable event; @@ -1246,7 +1246,7 @@ private: while (!event.wait_for(lock, std::chrono::seconds(check_period_sec), pred)) { lock.unlock(); - LOG_TRACE(&Poco::Logger::get("PeriodicUpdater"), "do periodic update"); + LOG_TRACE(getLogger("PeriodicUpdater"), "do periodic update"); try { loading_dispatcher.setConfiguration(config_files_reader.read()); @@ -1254,11 +1254,11 @@ private: } catch (const Exception & e) { - LOG_WARNING(&Poco::Logger::get("PeriodicUpdater"), "Failed to run PeriodicUpdater job, error: {}", e.what()); + LOG_WARNING(getLogger("PeriodicUpdater"), "Failed to run PeriodicUpdater job, error: {}", e.what()); } catch (...) { - LOG_WARNING(&Poco::Logger::get("PeriodicUpdater"), getCurrentExceptionMessage(false)); + LOG_WARNING(getLogger("PeriodicUpdater"), getCurrentExceptionMessage(false)); } lock.lock(); } @@ -1274,7 +1274,7 @@ private: }; -ExternalLoader::ExternalLoader(const String & type_name_, Poco::Logger * log_) +ExternalLoader::ExternalLoader(const String & type_name_, LoggerPtr log_) : config_files_reader(std::make_unique(type_name_, log_)) , loading_dispatcher(std::make_unique( [this](auto && a, auto && b, auto && c) { return createObject(a, b, c); }, diff --git a/src/Interpreters/ExternalLoader.h b/src/Interpreters/ExternalLoader.h index 2ea79d3aad..2f64573360 100644 --- a/src/Interpreters/ExternalLoader.h +++ b/src/Interpreters/ExternalLoader.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -83,7 +84,7 @@ public: template static constexpr bool is_vector_load_result_type = std::is_same_v || std::is_same_v; - ExternalLoader(const String & type_name_, Poco::Logger * log); + ExternalLoader(const String & type_name_, LoggerPtr log); virtual ~ExternalLoader(); /// Adds a repository which will be used to read configurations from. @@ -229,7 +230,7 @@ private: std::unique_ptr periodic_updater; const String type_name; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Interpreters/ExternalModelsLoader.cpp b/src/Interpreters/ExternalModelsLoader.cpp index 317cf0bf1c..017940910f 100644 --- a/src/Interpreters/ExternalModelsLoader.cpp +++ b/src/Interpreters/ExternalModelsLoader.cpp @@ -11,7 +11,7 @@ namespace ErrorCodes ExternalModelsLoader::ExternalModelsLoader(ContextPtr context_) - : ExternalLoader("external model", &Poco::Logger::get("ExternalModelsLoader")), WithContext(context_) + : ExternalLoader("external model", getLogger("ExternalModelsLoader")), WithContext(context_) { setConfigSettings({"model", "name", {}, {}}); enablePeriodicUpdates(true); diff --git a/src/Interpreters/GinFilter.cpp b/src/Interpreters/GinFilter.cpp index 1293924030..390ba0195e 100644 --- a/src/Interpreters/GinFilter.cpp +++ b/src/Interpreters/GinFilter.cpp @@ -16,8 +16,8 @@ namespace ErrorCodes void printValuesInRoaring(GinIndexPostingsList & post_list) { - LOG_TRACE(&Poco::Logger::get(__func__), "post_list.cardinality: {}", post_list.cardinality()); - LOG_TRACE(&Poco::Logger::get(__func__), "post_list.toString: {}", post_list.toString()); + LOG_TRACE(getLogger(__func__), "post_list.cardinality: {}", post_list.cardinality()); + LOG_TRACE(getLogger(__func__), "post_list.toString: {}", post_list.toString()); } GinFilterParameters::GinFilterParameters(size_t ngrams_, Float64 density_) diff --git a/src/Interpreters/GraceHashJoin.cpp b/src/Interpreters/GraceHashJoin.cpp index a59b28d73c..a7193fc544 100644 --- a/src/Interpreters/GraceHashJoin.cpp +++ b/src/Interpreters/GraceHashJoin.cpp @@ -70,7 +70,7 @@ namespace { if (!reader_queue->tryPop(index, UINT_MAX)) { - LOG_INFO(&Poco::Logger::get("GraceHashJoin"), "all file read done"); + LOG_INFO(getLogger("GraceHashJoin"), "all file read done"); return {}; } bool finished = false; @@ -78,11 +78,11 @@ namespace if (finished) { std::unique_lock lock(eof_cnt_mutex); - LOG_TRACE(&Poco::Logger::get("GraceHashJoin"), "file " + std::to_string(index) + " read done"); + LOG_TRACE(getLogger("GraceHashJoin"), "file " + std::to_string(index) + " read done"); eof_cnt ++; if (eof_cnt == total_reader) { - LOG_INFO(&Poco::Logger::get("GraceHashJoin"), "close all readers"); + LOG_INFO(getLogger("GraceHashJoin"), "close all readers"); reader_queue->close(); } } @@ -94,7 +94,7 @@ namespace } catch (...) { - tryLogCurrentException(&Poco::Logger::get("GraceHashJoin"), "Fail to read file"); + tryLogCurrentException(getLogger("GraceHashJoin"), "Fail to read file"); reader_queue->close(); throw; } @@ -170,7 +170,7 @@ class GraceHashJoin::FileBucket : boost::noncopyable public: using BucketLock = std::unique_lock; - explicit FileBucket(size_t bucket_index_, std::shared_ptr & left_files_, TemporaryFileStreamShardPtr & right_file_, Poco::Logger * log_, size_t read_result_block_size_, size_t read_block_bytes_) + explicit FileBucket(size_t bucket_index_, std::shared_ptr & left_files_, TemporaryFileStreamShardPtr & right_file_, LoggerPtr log_, size_t read_result_block_size_, size_t read_block_bytes_) : idx{bucket_index_}, left_files{left_files_}, right_file{right_file_}, state{State::WRITING_BLOCKS}, log{log_}, read_result_block_size{read_result_block_size_}, read_block_bytes(read_block_bytes_) { left_side_parallel = left_files_->size(); @@ -291,7 +291,7 @@ private: std::atomic state; std::atomic left_blk_id{0}; - Poco::Logger * log; + LoggerPtr log; size_t read_result_block_size; size_t read_block_bytes; @@ -335,7 +335,7 @@ GraceHashJoin::GraceHashJoin( bool enable_adaptive_spill, bool any_take_last_row_, int num_streams_) - : log{&Poco::Logger::get("GraceHashJoin")} + : log{getLogger("GraceHashJoin")} , context{context_} , adaptive_spill_mode(enable_adaptive_spill) , table_join{std::move(table_join_)} @@ -523,7 +523,7 @@ void GraceHashJoin::addBuckets(const size_t bucket_count) catch (...) { LOG_ERROR( - &Poco::Logger::get("GraceHashJoin"), + getLogger("GraceHashJoin"), "Can't create bucket {} due to error: {}", current_size + i, getCurrentExceptionMessage(false)); diff --git a/src/Interpreters/GraceHashJoin.h b/src/Interpreters/GraceHashJoin.h index d7912d3855..e223feb04b 100644 --- a/src/Interpreters/GraceHashJoin.h +++ b/src/Interpreters/GraceHashJoin.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -131,7 +132,7 @@ private: void initMaxJoinedBlockBytesInSpill(); - Poco::Logger * log; + LoggerPtr log; ContextPtr context; bool adaptive_spill_mode = false; bool join_blk_rows_inited = false; diff --git a/src/Interpreters/HashJoin.cpp b/src/Interpreters/HashJoin.cpp index d0dad2ef2a..29fe7c23b6 100644 --- a/src/Interpreters/HashJoin.cpp +++ b/src/Interpreters/HashJoin.cpp @@ -308,7 +308,7 @@ HashJoin::HashJoin(std::shared_ptr table_join_, const Block & right_s , ineuqal_column_name(table_join->getInequalColumnName()) , data(std::make_shared()) , right_sample_block(right_sample_block_) - , log(&Poco::Logger::get("HashJoin")) + , log(getLogger("HashJoin")) { LOG_DEBUG(log, "Right sample block: {}", right_sample_block.dumpStructure()); @@ -2806,7 +2806,7 @@ void HashJoin::validateInequalConditions(const ExpressionActionsPtr & inequal_co expression_sample_block.getByPosition(column_size - 1).name, strictnessToString(strictness), kindToString(kind)); } has_inequal_condition = true; - LOG_DEBUG(&Poco::Logger::get("HashJoin"), "validate inequal condition for header: {}", expression_sample_block.dumpStructure()); + LOG_DEBUG(getLogger("HashJoin"), "validate inequal condition for header: {}", expression_sample_block.dumpStructure()); } } diff --git a/src/Interpreters/HashJoin.h b/src/Interpreters/HashJoin.h index 2a3e5367ef..96c22b7998 100644 --- a/src/Interpreters/HashJoin.h +++ b/src/Interpreters/HashJoin.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -439,7 +440,7 @@ private: /// Left table column names that are sources for required_right_keys columns std::vector required_right_keys_sources; - Poco::Logger * log; + LoggerPtr log; Block totals; diff --git a/src/Interpreters/InternalTextLogsQueue.cpp b/src/Interpreters/InternalTextLogsQueue.cpp index 08ccf5f3e3..064a3bdf8c 100644 --- a/src/Interpreters/InternalTextLogsQueue.cpp +++ b/src/Interpreters/InternalTextLogsQueue.cpp @@ -64,7 +64,7 @@ void InternalTextLogsQueue::pushBlock(Block && log_block) if (blocksHaveEqualStructure(sample_block, log_block)) (void)(emplace(log_block.mutateColumns())); else - LOG_WARNING(&Poco::Logger::get("InternalTextLogsQueue"), "Log block have different structure"); + LOG_WARNING(getLogger("InternalTextLogsQueue"), "Log block have different structure"); } const char * InternalTextLogsQueue::getPriorityName(int priority) diff --git a/src/Interpreters/InterpreterAdviseQuery.cpp b/src/Interpreters/InterpreterAdviseQuery.cpp index 7f83228d2c..417d6e0a5b 100644 --- a/src/Interpreters/InterpreterAdviseQuery.cpp +++ b/src/Interpreters/InterpreterAdviseQuery.cpp @@ -51,7 +51,7 @@ BlockIO InterpreterAdviseQuery::execute() auto advises = advisor.analyze(queries, getContext()); auto stop_watch = std::chrono::high_resolution_clock::now(); - LOG_DEBUG(&Poco::Logger::get("InterpreterAdviseQuery"), "Analyze cost: {} ms", + LOG_DEBUG(getLogger("InterpreterAdviseQuery"), "Analyze cost: {} ms", std::chrono::duration_cast(stop_watch - start_watch).count()); start_watch = std::chrono::high_resolution_clock::now(); @@ -71,7 +71,7 @@ BlockIO InterpreterAdviseQuery::execute() stop_watch = std::chrono::high_resolution_clock::now(); - LOG_DEBUG(&Poco::Logger::get("InterpreterAdviseQuery"), "Apply advises cost: {} ms", + LOG_DEBUG(getLogger("InterpreterAdviseQuery"), "Apply advises cost: {} ms", std::chrono::duration_cast(stop_watch - start_watch).count()); if (query_ptr->as().output_ddl) @@ -105,7 +105,7 @@ BlockIO InterpreterAdviseQuery::execute() stop_watch = std::chrono::high_resolution_clock::now(); LOG_DEBUG( - &Poco::Logger::get("InterpreterAdviseQuery"), + getLogger("InterpreterAdviseQuery"), "Get optimal DDL cost: {} ms", std::chrono::duration_cast(stop_watch - start_watch).count()); } diff --git a/src/Interpreters/InterpreterAlterQuery.cpp b/src/Interpreters/InterpreterAlterQuery.cpp index d3a49d1fdd..f67a9e1ecb 100644 --- a/src/Interpreters/InterpreterAlterQuery.cpp +++ b/src/Interpreters/InterpreterAlterQuery.cpp @@ -106,7 +106,7 @@ BlockIO InterpreterAlterQuery::executeToTable(const ASTAlterQuery & alter) if (!cnch_txn) throw Exception("Cnch transaction is not initialized", ErrorCodes::CNCH_TRANSACTION_NOT_INITIALIZED); - LOG_INFO(&Poco::Logger::get("InterpreterAlterQuery"), "Waiting for cnch_lock for " + table_id.database_name + "." + table_id.table_name + "."); + LOG_INFO(getLogger("InterpreterAlterQuery"), "Waiting for cnch_lock for " + table_id.database_name + "." + table_id.table_name + "."); cnch_table_lock = cnch_txn->createIntentLock(IntentLock::TB_LOCK_PREFIX, table->getStorageID().database_name, table->getStorageID().table_name); } else diff --git a/src/Interpreters/InterpreterAlterWarehouseQuery.cpp b/src/Interpreters/InterpreterAlterWarehouseQuery.cpp index 56b7c9d85b..358acccf3b 100644 --- a/src/Interpreters/InterpreterAlterWarehouseQuery.cpp +++ b/src/Interpreters/InterpreterAlterWarehouseQuery.cpp @@ -90,7 +90,7 @@ BlockIO InterpreterAlterWarehouseQuery::execute() if (const ASTAssignment * assignment = child->as()) { const auto & assign_name = assignment->column_name; - LOG_TRACE(&Poco::Logger::get("InterpreterAlterWarehouseQuery"), "assign name {}", assign_name); + LOG_TRACE(getLogger("InterpreterAlterWarehouseQuery"), "assign name {}", assign_name); if (const ASTLiteral * literal = assignment->expression()->as()) { if (assign_name == "rule_name") @@ -197,7 +197,7 @@ BlockIO InterpreterAlterWarehouseQuery::execute() { const auto & assign_name = assignment->column_name; LOG_TRACE( - &Poco::Logger::get("InterpreterAlterWarehouseQuery"), + getLogger("InterpreterAlterWarehouseQuery"), "assign name {}, assygn type {}", assign_name, int(assignment->expression()->getType())); diff --git a/src/Interpreters/InterpreterAutoStatsQuery.h b/src/Interpreters/InterpreterAutoStatsQuery.h index 39b80f2140..2ae599c100 100644 --- a/src/Interpreters/InterpreterAutoStatsQuery.h +++ b/src/Interpreters/InterpreterAutoStatsQuery.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -19,7 +20,7 @@ public: private: ASTPtr query_ptr; - Poco::Logger * log = &Poco::Logger::get("InterpreterAutoStats"); + LoggerPtr log = getLogger("InterpreterAutoStats"); }; } diff --git a/src/Interpreters/InterpreterBeginQuery.cpp b/src/Interpreters/InterpreterBeginQuery.cpp index bb84a6894b..a56d6c6e84 100644 --- a/src/Interpreters/InterpreterBeginQuery.cpp +++ b/src/Interpreters/InterpreterBeginQuery.cpp @@ -31,7 +31,7 @@ BlockIO InterpreterBeginQuery::execute() "A transaction has already began: " + session_context->getCurrentTransaction()->getTransactionID().toString(), ErrorCodes::LOGICAL_ERROR); } - LOG_INFO(&Poco::Logger::get("InterpreterBeginQuery"), "Creating new explicit transaction"); + LOG_INFO(getLogger("InterpreterBeginQuery"), "Creating new explicit transaction"); auto & coordinator = session_context->getCnchTransactionCoordinator(); auto txn = coordinator.createTransaction(CreateTransactionOption().setType(CnchTransactionType::Explicit)); if (txn) @@ -44,7 +44,7 @@ BlockIO InterpreterBeginQuery::execute() throw Exception("Failed to create new explicit transaction", ErrorCodes::LOGICAL_ERROR); } - LOG_INFO(&Poco::Logger::get("InterpreterBeginQuery"), "Begin a new explicit transaction: {}", txn->getTransactionID()); + LOG_INFO(getLogger("InterpreterBeginQuery"), "Begin a new explicit transaction: {}", txn->getTransactionID()); return {}; } diff --git a/src/Interpreters/InterpreterCommitQuery.cpp b/src/Interpreters/InterpreterCommitQuery.cpp index a560107edb..60a44c9504 100644 --- a/src/Interpreters/InterpreterCommitQuery.cpp +++ b/src/Interpreters/InterpreterCommitQuery.cpp @@ -37,7 +37,7 @@ BlockIO InterpreterCommitQuery::execute() SCOPE_EXIT(session_context->setCurrentTransaction(nullptr);); - auto * log = &Poco::Logger::get("InterpreterCommitQuery"); + auto log = getLogger("InterpreterCommitQuery"); LOG_INFO(log, "Committing explicit transaction: {}", explicit_txn->getTransactionID()); try diff --git a/src/Interpreters/InterpreterCreateBindingQuery.h b/src/Interpreters/InterpreterCreateBindingQuery.h index f89ced02fe..da9f229b20 100644 --- a/src/Interpreters/InterpreterCreateBindingQuery.h +++ b/src/Interpreters/InterpreterCreateBindingQuery.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -15,7 +16,7 @@ public: private: ASTPtr query_ptr; ContextMutablePtr context; - Poco::Logger * log = &Poco::Logger::get("InterpreterCreateBindingQuery"); + LoggerPtr log = getLogger("InterpreterCreateBindingQuery"); }; } diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 2ce6b434ba..387e428636 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1237,7 +1237,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) } else if (create.attach && !create.attach_short_syntax && getContext()->getClientInfo().query_kind != ClientInfo::QueryKind::SECONDARY_QUERY) { - auto * log = &Poco::Logger::get("InterpreterCreateQuery"); + auto log = getLogger("InterpreterCreateQuery"); LOG_WARNING(log, "ATTACH TABLE query with full table definition is not recommended: " "use either ATTACH TABLE {}; to attach existing table " "or CREATE TABLE {} ; to create new table " diff --git a/src/Interpreters/InterpreterCreateStatsQuery.cpp b/src/Interpreters/InterpreterCreateStatsQuery.cpp index 9aa901ce53..4fc77ea79d 100644 --- a/src/Interpreters/InterpreterCreateStatsQuery.cpp +++ b/src/Interpreters/InterpreterCreateStatsQuery.cpp @@ -108,7 +108,7 @@ namespace { auto context = getContext(); Stopwatch watch; - auto * logger = &Poco::Logger::get("CreateStats"); + auto logger = getLogger("CreateStats"); while (counter < collect_targets.size()) { auto collect_target = collect_targets.at(counter++); diff --git a/src/Interpreters/InterpreterDropBindingQuery.h b/src/Interpreters/InterpreterDropBindingQuery.h index c8ff77a2bc..b19c5b72bd 100644 --- a/src/Interpreters/InterpreterDropBindingQuery.h +++ b/src/Interpreters/InterpreterDropBindingQuery.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -15,7 +16,7 @@ public: private: ASTPtr query_ptr; ContextMutablePtr context; - Poco::Logger * log = &Poco::Logger::get("InterpreterDropBindingQuery"); + LoggerPtr log = getLogger("InterpreterDropBindingQuery"); }; } diff --git a/src/Interpreters/InterpreterDumpQuery.h b/src/Interpreters/InterpreterDumpQuery.h index d65e3ec0fe..a293824852 100644 --- a/src/Interpreters/InterpreterDumpQuery.h +++ b/src/Interpreters/InterpreterDumpQuery.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -59,7 +60,7 @@ private: QueryDumper query_dumper; bool enable_ddl = true; bool enable_explain = false; - const Poco::Logger * log = &Poco::Logger::get("InterpreterDumpWorkloadQuery"); + const LoggerPtr log = getLogger("InterpreterDumpWorkloadQuery"); }; } diff --git a/src/Interpreters/InterpreterExplainQuery.cpp b/src/Interpreters/InterpreterExplainQuery.cpp index 1068f0e7cd..ee8a4bcf53 100644 --- a/src/Interpreters/InterpreterExplainQuery.cpp +++ b/src/Interpreters/InterpreterExplainQuery.cpp @@ -809,7 +809,7 @@ BlockInputStreamPtr InterpreterExplainQuery::explainMetaData() } catch (...) { - tryLogWarningCurrentException(&Poco::Logger::get("InterpreterExplainQuery::explainMetaData"), "build plan failed."); + tryLogWarningCurrentException(getLogger("InterpreterExplainQuery::explainMetaData"), "build plan failed."); } } diff --git a/src/Interpreters/InterpreterExplainQuery.h b/src/Interpreters/InterpreterExplainQuery.h index 3495b38d27..97aba6c793 100644 --- a/src/Interpreters/InterpreterExplainQuery.h +++ b/src/Interpreters/InterpreterExplainQuery.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -37,7 +38,7 @@ class InterpreterExplainQuery : public IInterpreter, WithMutableContext { public: InterpreterExplainQuery(const ASTPtr & query_, ContextMutablePtr context_) : WithMutableContext(context_), - query(query_), log(&Poco::Logger::get("InterpreterExplainQuery")) {} + query(query_), log(getLogger("InterpreterExplainQuery")) {} BlockIO execute() override; @@ -47,7 +48,7 @@ public: private: ASTPtr query; - Poco::Logger * log; + LoggerPtr log; SelectQueryOptions options; BlockInputStreamPtr executeImpl(); diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp index ee7bd829b1..40a008121a 100644 --- a/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -128,7 +128,7 @@ static NameSet genViewDependencyCreateQueries(StoragePtr storage, ContextPtr loc auto table = DatabaseCatalog::instance().tryGetTable(dependence, local_context); if (!table) { - LOG_WARNING(&Poco::Logger::get("InterpreterInsertQuery::genViewDependencyCreateQueries"), "table {} not found", dependence.getNameForLogs()); + LOG_WARNING(getLogger("InterpreterInsertQuery::genViewDependencyCreateQueries"), "table {} not found", dependence.getNameForLogs()); continue; } @@ -139,7 +139,7 @@ static NameSet genViewDependencyCreateQueries(StoragePtr storage, ContextPtr loc auto target_table = DatabaseCatalog::instance().tryGetTable(mv->getTargetTableId(), local_context); if (!target_table) { - LOG_WARNING(&Poco::Logger::get("InterpreterInsertQuery::genViewDependencyCreateQueries"), "target table for {} not exist", mv->getStorageID().getNameForLogs()); + LOG_WARNING(getLogger("InterpreterInsertQuery::genViewDependencyCreateQueries"), "target table for {} not exist", mv->getStorageID().getNameForLogs()); continue; } @@ -147,7 +147,7 @@ static NameSet genViewDependencyCreateQueries(StoragePtr storage, ContextPtr loc auto * target_cnch_merge = dynamic_cast(target_table.get()); if (!target_cnch_merge) { - LOG_WARNING(&Poco::Logger::get("InterpreterInsertQuery::genViewDependencyCreateQueries"), "table type not matched for {}, CnchMergeTree is expected", + LOG_WARNING(getLogger("InterpreterInsertQuery::genViewDependencyCreateQueries"), "table type not matched for {}, CnchMergeTree is expected", target_table->getStorageID().getNameForLogs()); continue; } @@ -185,7 +185,7 @@ StoragePtr InterpreterInsertQuery::getTable(ASTInsertQuery & query) std::nullopt, Strings{}, query.table_id.database_name); - LOG_TRACE(&Poco::Logger::get(__PRETTY_FUNCTION__), "Worker side create query: {}", create_query); + LOG_TRACE(getLogger(__PRETTY_FUNCTION__), "Worker side create query: {}", create_query); NameSet view_create_sqls = genViewDependencyCreateQueries(storage, getContext()); if (!view_create_sqls.empty()) @@ -387,7 +387,7 @@ BlockIO InterpreterInsertQuery::execute() /// set worker group for select query insert_select_context->initCnchServerResource(insert_select_context->getCurrentTransactionID()); LOG_DEBUG( - &Poco::Logger::get("VirtualWarehouse"), + getLogger("VirtualWarehouse"), "Set worker group {} for table {}", worker_group->getQualifiedName(), cloud_table->getStorageID().getNameForLogs()); } diff --git a/src/Interpreters/InterpreterPerfectShard.h b/src/Interpreters/InterpreterPerfectShard.h index b842ab0b11..0c3173762e 100644 --- a/src/Interpreters/InterpreterPerfectShard.h +++ b/src/Interpreters/InterpreterPerfectShard.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include @@ -50,7 +51,7 @@ public: : interpreter(interpreter_) , query(interpreter.query_for_perfect_shard->clone()) , context(interpreter.context) - , log(&Poco::Logger::get("InterpreterPerfectShard")) + , log(getLogger("InterpreterPerfectShard")) { query_info.query = query; processed_stage = determineProcessingStage(); @@ -81,7 +82,7 @@ private: InterpreterSelectQuery & interpreter; ASTPtr query; std::shared_ptr context; - Poco::Logger * log; + LoggerPtr log; SelectQueryInfo query_info; bool perfect_shardable = true; diff --git a/src/Interpreters/InterpreterReproduceQuery.h b/src/Interpreters/InterpreterReproduceQuery.h index 0ab81b12f2..e608712127 100644 --- a/src/Interpreters/InterpreterReproduceQuery.h +++ b/src/Interpreters/InterpreterReproduceQuery.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -21,7 +22,7 @@ public: private: ASTPtr query_ptr; SelectQueryOptions options; - const Poco::Logger * log = &Poco::Logger::get("InterpreterReproduceQuery"); + const LoggerPtr log = getLogger("InterpreterReproduceQuery"); /// database, table, status static BlockIO reproduceDDLImpl(PlanReproducer && reproducer); diff --git a/src/Interpreters/InterpreterRollbackQuery.cpp b/src/Interpreters/InterpreterRollbackQuery.cpp index dd3531be4a..0ddb3e6fca 100644 --- a/src/Interpreters/InterpreterRollbackQuery.cpp +++ b/src/Interpreters/InterpreterRollbackQuery.cpp @@ -39,7 +39,7 @@ BlockIO InterpreterRollbackQuery::execute() } SCOPE_EXIT(session_context->setCurrentTransaction(nullptr);); - auto * log = &Poco::Logger::get("InterpreterRollbackQuery"); + auto log = getLogger("InterpreterRollbackQuery"); LOG_DEBUG(log, "Rollbacking explicit transaction: {}", explicit_txn->getTransactionID()); try diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 8ccad9dd09..c80386bdb2 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -338,7 +338,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( , storage(storage_) , input(input_) , input_pipe(std::move(input_pipe_)) - , log(&Poco::Logger::get("InterpreterSelectQuery")) + , log(getLogger("InterpreterSelectQuery")) , metadata_snapshot(metadata_snapshot_) { checkStackSize(); @@ -720,7 +720,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( if (query_info.projection) storage_snapshot->addProjection(query_info.projection->desc); - + LOG_TRACE(log, "query: " + queryToString(query)); std::ostringstream ostr; for (auto & c : required_columns) @@ -2389,7 +2389,6 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc if (!query_plan.isInitialized()) { auto header = storage_snapshot->getSampleBlockForColumns(required_columns); - /// add bitmap index result column for null source if (auto * bitmap_index_info = dynamic_cast(query_analyzer->getIndexContext()->get(MergeTreeIndexInfo::Type::BITMAP).get())) { diff --git a/src/Interpreters/InterpreterSelectQuery.h b/src/Interpreters/InterpreterSelectQuery.h index 87b5302a24..9e17bad874 100644 --- a/src/Interpreters/InterpreterSelectQuery.h +++ b/src/Interpreters/InterpreterSelectQuery.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include @@ -245,7 +246,7 @@ private: BlockInputStreamPtr input; std::optional input_pipe; - Poco::Logger * log; + LoggerPtr log; StorageMetadataPtr metadata_snapshot; bool has_join = false; StorageSnapshotPtr storage_snapshot; diff --git a/src/Interpreters/InterpreterSelectQueryUseOptimizer.cpp b/src/Interpreters/InterpreterSelectQueryUseOptimizer.cpp index cc7a24f6d0..5fa65654c7 100644 --- a/src/Interpreters/InterpreterSelectQueryUseOptimizer.cpp +++ b/src/Interpreters/InterpreterSelectQueryUseOptimizer.cpp @@ -134,7 +134,7 @@ InterpreterSelectQueryUseOptimizer::InterpreterSelectQueryUseOptimizer( , cte_info(std::move(cte_info_)) , context(context_) , options(options_) - , log(&Poco::Logger::get("InterpreterSelectQueryUseOptimizer")) + , log(getLogger("InterpreterSelectQueryUseOptimizer")) { interpret_sub_query = !!sub_plan_ptr; } @@ -298,7 +298,7 @@ std::pair> InterpreterSelectQueryUseOpti return std::make_pair(std::move(plan_segment_tree), std::move(used_storage_ids)); } -QueryPipeline executeTEALimit(QueryPipeline & pipeline, ContextMutablePtr context, ASTPtr query_ptr, Poco::Logger * log) +QueryPipeline executeTEALimit(QueryPipeline & pipeline, ContextMutablePtr context, ASTPtr query_ptr, LoggerPtr log) { const ASTSelectWithUnionQuery & ast = query_ptr->as(); diff --git a/src/Interpreters/InterpreterSelectQueryUseOptimizer.h b/src/Interpreters/InterpreterSelectQueryUseOptimizer.h index ab68c8fe07..014ddd50ab 100644 --- a/src/Interpreters/InterpreterSelectQueryUseOptimizer.h +++ b/src/Interpreters/InterpreterSelectQueryUseOptimizer.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -98,7 +99,7 @@ private: CTEInfo cte_info; ContextMutablePtr context; SelectQueryOptions options; - Poco::Logger * log; + LoggerPtr log; bool interpret_sub_query; PlanSegmentTreePtr plan_segment_tree_ptr; diff --git a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp index da82f19bc4..69fab73adc 100644 --- a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp +++ b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp @@ -59,7 +59,7 @@ namespace ErrorCodes InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery( const ASTPtr & query_ptr_, ContextPtr context_, const SelectQueryOptions & options_, const Names & required_result_column_names) - : IInterpreterUnionOrSelectQuery(query_ptr_, context_, options_), log(&Poco::Logger::get("InterpreterSelectWithUnionQuery")) + : IInterpreterUnionOrSelectQuery(query_ptr_, context_, options_), log(getLogger("InterpreterSelectWithUnionQuery")) { ASTSelectWithUnionQuery * ast = query_ptr->as(); bool require_full_header = ast->hasNonDefaultUnionMode(); diff --git a/src/Interpreters/InterpreterSelectWithUnionQuery.h b/src/Interpreters/InterpreterSelectWithUnionQuery.h index d7f622d6aa..cb36a14dc2 100644 --- a/src/Interpreters/InterpreterSelectWithUnionQuery.h +++ b/src/Interpreters/InterpreterSelectWithUnionQuery.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include @@ -73,7 +74,7 @@ private: std::unique_ptr buildCurrentChildInterpreter(const ASTPtr & ast_ptr_, const Names & current_required_result_column_names); - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Interpreters/InterpreterSetQuery.cpp b/src/Interpreters/InterpreterSetQuery.cpp index 0e0e0cb6c5..a240c8dd9b 100644 --- a/src/Interpreters/InterpreterSetQuery.cpp +++ b/src/Interpreters/InterpreterSetQuery.cpp @@ -157,12 +157,12 @@ void InterpreterSetQuery::applyABTestProfile(ContextMutablePtr query_context) } catch (...) { - tryLogWarningCurrentException(&Poco::Logger::get("applyABTestProfile"), "Apply ab test profile failed."); + tryLogWarningCurrentException(getLogger("applyABTestProfile"), "Apply ab test profile failed."); } } else { - LOG_WARNING(&Poco::Logger::get("applyABTestProfile"), "Apply ab test profile failed, ab_test_traffic_factor must be between 0 and 1, ab_test_profile != default"); + LOG_WARNING(getLogger("applyABTestProfile"), "Apply ab test profile failed, ab_test_traffic_factor must be between 0 and 1, ab_test_profile != default"); } } } diff --git a/src/Interpreters/InterpreterShowBindingsQuery.cpp b/src/Interpreters/InterpreterShowBindingsQuery.cpp index 90faf2ca52..9244f2e90b 100644 --- a/src/Interpreters/InterpreterShowBindingsQuery.cpp +++ b/src/Interpreters/InterpreterShowBindingsQuery.cpp @@ -80,7 +80,7 @@ BlockIO InterpreterShowBindingsQuery::execute() } catch (...) { - LOG_WARNING(&Poco::Logger::get("SQL Binding"), "Update Global Bindings Failed"); + LOG_WARNING(getLogger("SQL Binding"), "Update Global Bindings Failed"); } } diff --git a/src/Interpreters/InterpreterShowStatsQuery.cpp b/src/Interpreters/InterpreterShowStatsQuery.cpp index 4995eafdc6..160edecb7b 100644 --- a/src/Interpreters/InterpreterShowStatsQuery.cpp +++ b/src/Interpreters/InterpreterShowStatsQuery.cpp @@ -198,7 +198,7 @@ void readDbStats(ContextPtr context, const String & original_db_name, const Stri auto db_name = original_db_name; auto catalog = createCatalogAdaptor(context); - auto logger = &Poco::Logger::get("load stats"); + auto logger = getLogger("load stats"); auto load_ts = AutoStats::convertToDateTime64(AutoStats::nowTimePoint()); for (auto & table_pb : db_stats.tables()) @@ -208,7 +208,7 @@ void readDbStats(ContextPtr context, const String & original_db_name, const Stri if (!table_id_opt) { auto msg = "table " + table_name + " not exist in database " + db_name; - logger->warning(msg); + LOG_WARNING(logger, msg); continue; } diff --git a/src/Interpreters/InterpreterShowTablesQuery.cpp b/src/Interpreters/InterpreterShowTablesQuery.cpp index dc7c87dfb1..bfe1ec5064 100644 --- a/src/Interpreters/InterpreterShowTablesQuery.cpp +++ b/src/Interpreters/InterpreterShowTablesQuery.cpp @@ -287,7 +287,7 @@ static String rewriteShowCatalogForExternal(const ASTShowTablesQuery & query, co if (query.limit_length) rewritten_query << " LIMIT " << query.limit_length; - LOG_TRACE(&Poco::Logger::get("getRewrittenQueryForExternalCatalogImpl"), rewritten_query.str()); + LOG_TRACE(getLogger("getRewrittenQueryForExternalCatalogImpl"), rewritten_query.str()); return rewritten_query.str(); } diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index 40ccf95844..99349a621d 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -261,7 +261,7 @@ void InterpreterSystemQuery::startStopAction(StorageActionBlockType action_type, InterpreterSystemQuery::InterpreterSystemQuery(const ASTPtr & query_ptr_, ContextMutablePtr context_) - : WithMutableContext(context_), query_ptr(query_ptr_->clone()), log(&Poco::Logger::get("InterpreterSystemQuery")) + : WithMutableContext(context_), query_ptr(query_ptr_->clone()), log(getLogger("InterpreterSystemQuery")) { } @@ -1657,7 +1657,7 @@ namespace { template -void executeActionOnCNCHLogImpl(std::shared_ptr cnch_log, ASTSystemQuery::Type type, const String & table_name , Poco::Logger * log) +void executeActionOnCNCHLogImpl(std::shared_ptr cnch_log, ASTSystemQuery::Type type, const String & table_name , LoggerPtr log) { using Type = ASTSystemQuery::Type; if (cnch_log) diff --git a/src/Interpreters/InterpreterSystemQuery.h b/src/Interpreters/InterpreterSystemQuery.h index f02e3733bb..cf8a1c962e 100644 --- a/src/Interpreters/InterpreterSystemQuery.h +++ b/src/Interpreters/InterpreterSystemQuery.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -60,7 +61,7 @@ public: private: ASTPtr query_ptr; - Poco::Logger * log = nullptr; + LoggerPtr log = nullptr; StorageID table_id = StorageID::createEmpty(); /// Will be set up if query contains table name VolumePtr volume_ptr; diff --git a/src/Interpreters/InterpreterUpdateQuery.cpp b/src/Interpreters/InterpreterUpdateQuery.cpp index 95889d212c..ea03a1ace7 100644 --- a/src/Interpreters/InterpreterUpdateQuery.cpp +++ b/src/Interpreters/InterpreterUpdateQuery.cpp @@ -74,7 +74,7 @@ getTableBlockIO(const StoragePtr & storage, ContextMutablePtr query_context) InterpreterUpdateQuery::InterpreterUpdateQuery(const ASTPtr & query_ptr_, ContextPtr context_) : WithContext(context_), query_ptr(query_ptr_), - log(&Poco::Logger::get("InterpreterUpdateQuery")) + log(getLogger("InterpreterUpdateQuery")) { } diff --git a/src/Interpreters/InterpreterUpdateQuery.h b/src/Interpreters/InterpreterUpdateQuery.h index 7b700c18fe..a6b8ec8522 100644 --- a/src/Interpreters/InterpreterUpdateQuery.h +++ b/src/Interpreters/InterpreterUpdateQuery.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -25,7 +26,7 @@ private: BlockIO executePartialUpdate(const StoragePtr & storage); ASTPtr prepareInsertQueryForPartialUpdate(const StoragePtr & storage, const std::unordered_map & name_to_expression_map); ASTPtr query_ptr; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Interpreters/InterserverCredentials.cpp b/src/Interpreters/InterserverCredentials.cpp index e60d397eb0..a1da6b1323 100644 --- a/src/Interpreters/InterserverCredentials.cpp +++ b/src/Interpreters/InterserverCredentials.cpp @@ -34,7 +34,7 @@ InterserverCredentials::CurrentCredentials InterserverCredentials::parseCredenti const Poco::Util::AbstractConfiguration & config, const std::string & root_tag) { - auto * log = &Poco::Logger::get("InterserverCredentials"); + auto log = getLogger("InterserverCredentials"); CurrentCredentials store; store.emplace_back(current_user_, current_password_); if (config.getBool(root_tag + ".allow_empty", false)) diff --git a/src/Interpreters/JIT/CHJIT.cpp b/src/Interpreters/JIT/CHJIT.cpp index 69ab631381..71373193e0 100644 --- a/src/Interpreters/JIT/CHJIT.cpp +++ b/src/Interpreters/JIT/CHJIT.cpp @@ -425,7 +425,7 @@ CHJIT::CompiledModule CHJIT::compileModule(std::function std::string tmp; llvm::raw_string_ostream os(tmp); module->print(os, nullptr); - LOG_TRACE(&Poco::Logger::get("CompiledModule"), "module: ===\n{}===\n", os.str()); + LOG_TRACE(getLogger("CompiledModule"), "module: ===\n{}===\n", os.str()); auto module_info = compileModule(std::move(module)); ++current_module_key; diff --git a/src/Interpreters/MySQL/InterpretersAnalyticalMySQLDDLQuery.h b/src/Interpreters/MySQL/InterpretersAnalyticalMySQLDDLQuery.h index 63132449ed..7bd12d3b95 100644 --- a/src/Interpreters/MySQL/InterpretersAnalyticalMySQLDDLQuery.h +++ b/src/Interpreters/MySQL/InterpretersAnalyticalMySQLDDLQuery.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -43,7 +44,7 @@ class InterpreterAnalyticalMySQLDDLQuery : public IInterpreter, WithMutableConte public: InterpreterAnalyticalMySQLDDLQuery( const ASTPtr & query_ptr_, ContextMutablePtr context_) - : WithMutableContext(context_), query_ptr(query_ptr_), log(&Poco::Logger::get("InterpreterAnalyticalMySQLDDLQuery")) + : WithMutableContext(context_), query_ptr(query_ptr_), log(getLogger("InterpreterAnalyticalMySQLDDLQuery")) { } @@ -68,7 +69,7 @@ public: private: ASTPtr query_ptr; - Poco::Logger * log; + LoggerPtr log; }; using InterpreterAnalyticalMySQLAlterQuery = InterpreterAnalyticalMySQLDDLQuery; diff --git a/src/Interpreters/NamedSession.cpp b/src/Interpreters/NamedSession.cpp index 3178dbca84..584cf0b9b0 100644 --- a/src/Interpreters/NamedSession.cpp +++ b/src/Interpreters/NamedSession.cpp @@ -136,7 +136,7 @@ std::chrono::steady_clock::duration NamedSessionsImpl::closeSessio { /// Schedule closeSessions() every 1 second by default. static constexpr std::chrono::steady_clock::duration close_interval = std::chrono::seconds(1); - static auto * log = &Poco::Logger::get("NamedSession"); + auto log = getLogger("NamedSession"); if (close_times.empty()) return close_interval; @@ -192,11 +192,10 @@ NamedCnchSession::NamedCnchSession(NamedSessionKey key_, ContextPtr context_, si void NamedCnchSession::release() { - static auto * log = &Poco::Logger::get("NamedSession"); timeout = 0; /// schedule immediately close_time = 0; parent.releaseSession(*this); - LOG_DEBUG(log, "Release CnchWorkerResource {}", key); + LOG_DEBUG(getLogger("NamedCnchSession"), "Release CnchWorkerResource {}", key); } template class NamedSessionsImpl; diff --git a/src/Interpreters/NestedLoopJoin.h b/src/Interpreters/NestedLoopJoin.h index e872e1200b..5b6d8f1cbd 100644 --- a/src/Interpreters/NestedLoopJoin.h +++ b/src/Interpreters/NestedLoopJoin.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include @@ -93,7 +94,7 @@ public: size_t getTotalByteCount() const override; private: - Poco::Logger * log = &Poco::Logger::get("NestedLoopJoin"); + LoggerPtr log = getLogger("NestedLoopJoin"); using ExpressionActionsPtr = std::shared_ptr; std::shared_ptr table_join; diff --git a/src/Interpreters/NodeSelector.h b/src/Interpreters/NodeSelector.h index dd861a9184..2ed7adf9fd 100644 --- a/src/Interpreters/NodeSelector.h +++ b/src/Interpreters/NodeSelector.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -125,7 +126,7 @@ template class CommonNodeSelector { public: - CommonNodeSelector(const ClusterNodes & cluster_nodes_, Poco::Logger * log_) : cluster_nodes(cluster_nodes_), log(log_) { } + CommonNodeSelector(const ClusterNodes & cluster_nodes_, LoggerPtr log_) : cluster_nodes(cluster_nodes_), log(log_) { } void checkClusterInfo(PlanSegment * plan_segment_ptr) { @@ -172,20 +173,20 @@ public: protected: const ClusterNodes & cluster_nodes; - Poco::Logger * log; + LoggerPtr log; }; class LocalNodeSelector : public CommonNodeSelector { public: - LocalNodeSelector(const ClusterNodes & cluster_nodes_, Poco::Logger * log_) : CommonNodeSelector(cluster_nodes_, log_) { } + LocalNodeSelector(const ClusterNodes & cluster_nodes_, LoggerPtr log_) : CommonNodeSelector(cluster_nodes_, log_) { } NodeSelectorResult select(PlanSegment * plan_segment_ptr, ContextPtr query_context); }; class LocalityNodeSelector : public CommonNodeSelector { public: - LocalityNodeSelector(const ClusterNodes & cluster_nodes_, Poco::Logger * log_) : CommonNodeSelector(cluster_nodes_, log_) { } + LocalityNodeSelector(const ClusterNodes & cluster_nodes_, LoggerPtr log_) : CommonNodeSelector(cluster_nodes_, log_) { } NodeSelectorResult select(PlanSegment * plan_segment_ptr, ContextPtr query_context, DAGGraph * dag_graph_ptr); }; @@ -193,14 +194,14 @@ class SourceNodeSelector : public CommonNodeSelector { public: using Map = std::unordered_map>; - SourceNodeSelector(const ClusterNodes & cluster_nodes_, Poco::Logger * log_) : CommonNodeSelector(cluster_nodes_, log_) { } + SourceNodeSelector(const ClusterNodes & cluster_nodes_, LoggerPtr log_) : CommonNodeSelector(cluster_nodes_, log_) { } NodeSelectorResult select(PlanSegment * plan_segment_ptr, ContextPtr query_context, DAGGraph * dag_graph_ptr); }; class ComputeNodeSelector : public CommonNodeSelector { public: - ComputeNodeSelector(const ClusterNodes & cluster_nodes_, Poco::Logger * log_) : CommonNodeSelector(cluster_nodes_, log_) { } + ComputeNodeSelector(const ClusterNodes & cluster_nodes_, LoggerPtr log_) : CommonNodeSelector(cluster_nodes_, log_) { } NodeSelectorResult select(PlanSegment * plan_segment_ptr, ContextPtr query_context, DAGGraph * dag_graph_ptr); }; @@ -213,7 +214,7 @@ public: : query_context(query_context_) , dag_graph_ptr(dag_graph_ptr_.get()) , cluster_nodes(cluster_nodes_) - , log(&Poco::Logger::get("NodeSelector")) + , log(getLogger("NodeSelector")) , local_node_selector(cluster_nodes, log) , source_node_selector(cluster_nodes, log) , compute_node_selector(cluster_nodes, log) @@ -240,7 +241,7 @@ private: ContextPtr query_context; DAGGraph * dag_graph_ptr; const ClusterNodes & cluster_nodes; - Poco::Logger * log; + LoggerPtr log; LocalNodeSelector local_node_selector; SourceNodeSelector source_node_selector; ComputeNodeSelector compute_node_selector; diff --git a/src/Interpreters/PartLog.cpp b/src/Interpreters/PartLog.cpp index dfe58cb2f2..2927d69974 100644 --- a/src/Interpreters/PartLog.cpp +++ b/src/Interpreters/PartLog.cpp @@ -207,7 +207,7 @@ bool PartLog::addNewParts( } catch (...) { - tryLogCurrentException(part_log ? part_log->log : &Poco::Logger::get("PartLog"), __PRETTY_FUNCTION__); + tryLogCurrentException(part_log ? part_log->log : getRawLogger("PartLog"), __PRETTY_FUNCTION__); return false; } diff --git a/src/Interpreters/PreparedStatement/PreparedStatementLoaderFromDisk.cpp b/src/Interpreters/PreparedStatement/PreparedStatementLoaderFromDisk.cpp index 2409e87cf7..13095ba903 100644 --- a/src/Interpreters/PreparedStatement/PreparedStatementLoaderFromDisk.cpp +++ b/src/Interpreters/PreparedStatement/PreparedStatementLoaderFromDisk.cpp @@ -51,7 +51,7 @@ namespace } PreparedStatementLoaderFromDisk::PreparedStatementLoaderFromDisk(const String & dir_path_) - : dir_path{makeDirectoryPathCanonical(dir_path_)}, log{&Poco::Logger::get("PreparedStatementLoaderFromDisk")} + : dir_path{makeDirectoryPathCanonical(dir_path_)}, log{getLogger("PreparedStatementLoaderFromDisk")} { createDirectory(); } diff --git a/src/Interpreters/PreparedStatement/PreparedStatementLoaderFromDisk.h b/src/Interpreters/PreparedStatement/PreparedStatementLoaderFromDisk.h index 30369c44a0..bcb77f47a6 100644 --- a/src/Interpreters/PreparedStatement/PreparedStatementLoaderFromDisk.h +++ b/src/Interpreters/PreparedStatement/PreparedStatementLoaderFromDisk.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -36,7 +37,7 @@ private: String dir_path; mutable std::shared_mutex mutex; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Interpreters/PreparedStatement/PreparedStatementManager.cpp b/src/Interpreters/PreparedStatement/PreparedStatementManager.cpp index 31c4ecc365..43ad89e5f4 100644 --- a/src/Interpreters/PreparedStatement/PreparedStatementManager.cpp +++ b/src/Interpreters/PreparedStatement/PreparedStatementManager.cpp @@ -243,7 +243,7 @@ void PreparedStatementManager::loadStatementsFromCatalog(ContextMutablePtr & con catch (...) { tryLogWarningCurrentException( - &Poco::Logger::get("PreparedStatementManager"), + getLogger("PreparedStatementManager"), fmt::format("while build prepared statement {} plan", backQuote(statement->name))); continue; } diff --git a/src/Interpreters/ProcessList.cpp b/src/Interpreters/ProcessList.cpp index e567160154..9545504cd0 100644 --- a/src/Interpreters/ProcessList.cpp +++ b/src/Interpreters/ProcessList.cpp @@ -253,7 +253,7 @@ ProcessList::EntryPtr ProcessList::insert(const String & query_, const IAST * as if (!is_unlimited_query && max_size && processes_size >= max_size) { if (queue_max_wait_ms) - LOG_WARNING(&Poco::Logger::get("ProcessList"), "Too many simultaneous queries, will wait {} ms.", queue_max_wait_ms); + LOG_WARNING(getLogger("ProcessList"), "Too many simultaneous queries, will wait {} ms.", queue_max_wait_ms); if (!queue_max_wait_ms || !have_space.wait_for(lock, std::chrono::milliseconds(queue_max_wait_ms), [&]{ return processes.size() < max_size; })) throw Exception("Too many simultaneous queries. Maximum: " + toString(max_size), ErrorCodes::TOO_MANY_SIMULTANEOUS_QUERIES); } @@ -307,7 +307,7 @@ ProcessList::EntryPtr ProcessList::insert(const String & query_, const IAST * as auto query_it = user_process_list.queries.emplace(client_info.current_query_id, query_status); if (!query_it.second) { - LOG_ERROR(&Poco::Logger::get("ProcessList"), "Logical error: cannot insert Querystatus into user_process_list"); + LOG_ERROR(getLogger("ProcessList"), "Logical error: cannot insert Querystatus into user_process_list"); } lock.unlock(); @@ -455,13 +455,13 @@ ProcessListEntry::~ProcessListEntry() auto user_process_list_it = parent.user_to_queries.find(user); if (user_process_list_it == parent.user_to_queries.end()) { - LOG_ERROR(&Poco::Logger::get("ProcessList"), "Logical error: cannot find user in ProcessList"); + LOG_ERROR(getLogger("ProcessList"), "Logical error: cannot find user in ProcessList"); } ProcessListForUser & user_process_list = user_process_list_it->second; if (!user_process_list.queries.erase(query_id)) { - LOG_ERROR(&Poco::Logger::get("ProcessList"), "Logical error: cannot find query by query_id and pointer to ProcessListElement in ProcessListForUser"); + LOG_ERROR(getLogger("ProcessList"), "Logical error: cannot find query by query_id and pointer to ProcessListElement in ProcessListForUser"); } parent.have_space.notify_all(); @@ -629,7 +629,7 @@ bool QueryStatus::checkCpuTimeLimit(String node_name) double total_query_cpu_seconds = total_query_cpu_micros * 1.0 / 1000000; double thread_cpu_seconds = thread_cpu_micros * 1.0 / 1000000; - LOG_TRACE(&Poco::Logger::get("ThreadStatus"), "node {} checkCpuTimeLimit thread cpu secs = {}, total cpu secs = {}, max = {}", + LOG_TRACE(getLogger("ThreadStatus"), "node {} checkCpuTimeLimit thread cpu secs = {}, total cpu secs = {}, max = {}", node_name, thread_cpu_seconds, total_query_cpu_seconds, settings.max_query_cpu_seconds); if (total_query_cpu_micros > settings.max_query_cpu_seconds * 1000000) { diff --git a/src/Interpreters/PushFilterToStorage.h b/src/Interpreters/PushFilterToStorage.h index a9ee8aa6c2..1a59b00f15 100644 --- a/src/Interpreters/PushFilterToStorage.h +++ b/src/Interpreters/PushFilterToStorage.h @@ -1,5 +1,6 @@ #pragma once +#include #include "common/logger_useful.h" #include "Interpreters/Context_fwd.h" #include "Parsers/IAST_fwd.h" @@ -20,6 +21,6 @@ public: private: ConstStoragePtr storage; - const Poco::Logger * logger = &Poco::Logger::get("PushFilterToStorage"); + const LoggerPtr logger = getLogger("PushFilterToStorage"); }; } diff --git a/src/Interpreters/QueueManager.cpp b/src/Interpreters/QueueManager.cpp index ca56b7c4c0..a74ea7c121 100644 --- a/src/Interpreters/QueueManager.cpp +++ b/src/Interpreters/QueueManager.cpp @@ -65,7 +65,7 @@ bool ResourceQeueueThrottler::isThrottling(QueueInfo * queue_info) if (worker_status == nullptr) { LOG_DEBUG( - &Poco::Logger::get("QueueManager"), + getLogger("QueueManager"), "{} ResourceQeueueThrottler {}.{} is nullptr", queue_info->query_id, queue_info->vw_name, @@ -74,7 +74,7 @@ bool ResourceQeueueThrottler::isThrottling(QueueInfo * queue_info) } if (worker_status->getWorkerGroupHealth() == WorkerGroupHealthStatus::Critical) { - LOG_TRACE(&Poco::Logger::get("QueueManager"), "{} ResourceQeueueThrottler throttle", queue_info->query_id); + LOG_TRACE(getLogger("QueueManager"), "{} ResourceQeueueThrottler throttle", queue_info->query_id); return true; } return false; @@ -83,7 +83,7 @@ bool ResourceQeueueThrottler::isThrottling(QueueInfo * queue_info) void VWConcurrencyQeueueThrottler::release(const String & vw) { std::unique_lock lk(mutex); - LOG_TRACE(&Poco::Logger::get("QueueManager"), "VWConcurrencyQeueueThrottler minus {} parallel size", vw); + LOG_TRACE(getLogger("QueueManager"), "VWConcurrencyQeueueThrottler minus {} parallel size", vw); vw_parallel_map[vw]--; } @@ -97,12 +97,12 @@ bool VWConcurrencyQeueueThrottler::isThrottling(QueueInfo * queue_info) std::unique_lock lk(mutex); if (vw_parallel_map[queue_info->vw] >= queue_manager->getVWParallelizeSize()) { - LOG_TRACE(&Poco::Logger::get("QueueManager"), "VWConcurrencyQeueueThrottler throttle"); + LOG_TRACE(getLogger("QueueManager"), "VWConcurrencyQeueueThrottler throttle"); return true; } LOG_TRACE( - &Poco::Logger::get("QueueManager"), "{} VWConcurrencyQeueueThrottler add {} parallel size", queue_info->query_id, queue_info->vw); + getLogger("QueueManager"), "{} VWConcurrencyQeueueThrottler add {} parallel size", queue_info->query_id, queue_info->vw); vw_parallel_map[queue_info->vw]++; queue_info->context->setQueueDeleter(getDeleter(getWorkerGroupName(queue_info->vw_name, queue_info->wg_name))); return false; @@ -148,7 +148,7 @@ void QueueManager::cancel(const String & query_id) } } -QueueManager::QueueManager(ContextWeakMutablePtr context_) : WithContext(context_), log(&Poco::Logger::get("QueueManager")) +QueueManager::QueueManager(ContextWeakMutablePtr context_) : WithContext(context_), log(getLogger("QueueManager")) { LOG_DEBUG(log, "Start QueueManager"); schedule_pool.emplace(1, CurrentMetrics::BackgroundQueueManagerSchedulePoolTask, "QueuePool"); diff --git a/src/Interpreters/QueueManager.h b/src/Interpreters/QueueManager.h index bb73db31be..3e46db1c7e 100644 --- a/src/Interpreters/QueueManager.h +++ b/src/Interpreters/QueueManager.h @@ -1,4 +1,5 @@ #pragma once +#include #include #include #include @@ -161,7 +162,7 @@ private: mutable std::optional schedule_pool; std::unique_ptr queue_manager_trigger_task; QeueueThrottlersPtr throttlers; - Poco::Logger * log; + LoggerPtr log; VWConcurrencyQeueueThrottlerPtr vw_concurrency_throttler; }; diff --git a/src/Interpreters/RuntimeFilter/ConcurrentHashMap.h b/src/Interpreters/RuntimeFilter/ConcurrentHashMap.h index 5407730590..11c2bc903e 100644 --- a/src/Interpreters/RuntimeFilter/ConcurrentHashMap.h +++ b/src/Interpreters/RuntimeFilter/ConcurrentHashMap.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -235,7 +236,7 @@ class ConcurrentHashMap { public: explicit ConcurrentHashMap(size_t num_shard = 128, Hash const & hash_function_ = Hash()) - : hash_function(hash_function_), shards(num_shard), log(&Poco::Logger::get("ConcurrentShardMap")) + : hash_function(hash_function_), shards(num_shard), log(getLogger("ConcurrentShardMap")) { for (unsigned i = 0; i < num_shard; ++i) shards[i].reset(new Shard()); @@ -313,7 +314,7 @@ private: Hash hash_function; std::vector> shards; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Interpreters/RuntimeFilter/RuntimeFilterConsumer.cpp b/src/Interpreters/RuntimeFilter/RuntimeFilterConsumer.cpp index 93111fe4df..00cddc717e 100644 --- a/src/Interpreters/RuntimeFilter/RuntimeFilterConsumer.cpp +++ b/src/Interpreters/RuntimeFilter/RuntimeFilterConsumer.cpp @@ -24,7 +24,7 @@ RuntimeFilterConsumer::RuntimeFilterConsumer( , parallel_id(parallel_id_) , build_params_blocks(local_stream_parallel) , timer{CLOCK_MONOTONIC_COARSE} - , log(&Poco::Logger::get("RuntimeFilterBuild")) + , log(getLogger("RuntimeFilterBuild")) { timer.start(); } @@ -122,9 +122,9 @@ static void OnSendRuntimeFilterCallback( rpc_channel->checkAliveWithController(*cntl); if (cntl->Failed()) - LOG_DEBUG(&Poco::Logger::get("RuntimeFilterBuild"), "Send to coordinator failed, message: " + cntl->ErrorText()); + LOG_DEBUG(getLogger("RuntimeFilterBuild"), "Send to coordinator failed, message: " + cntl->ErrorText()); else - LOG_DEBUG(&Poco::Logger::get("RuntimeFilterBuild"), "Send to coordinator success"); + LOG_DEBUG(getLogger("RuntimeFilterBuild"), "Send to coordinator success"); } void RuntimeFilterConsumer::transferRuntimeFilter(RuntimeFilterData && data) diff --git a/src/Interpreters/RuntimeFilter/RuntimeFilterConsumer.h b/src/Interpreters/RuntimeFilter/RuntimeFilterConsumer.h index ee219205e7..e83093abcb 100644 --- a/src/Interpreters/RuntimeFilter/RuntimeFilterConsumer.h +++ b/src/Interpreters/RuntimeFilter/RuntimeFilterConsumer.h @@ -1,4 +1,5 @@ #pragma once +#include #include #include #include @@ -76,6 +77,6 @@ private: std::atomic_bool is_bypassed = false; Stopwatch timer; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Interpreters/RuntimeFilter/RuntimeFilterManager.cpp b/src/Interpreters/RuntimeFilter/RuntimeFilterManager.cpp index be2be217a3..22e4800f08 100644 --- a/src/Interpreters/RuntimeFilter/RuntimeFilterManager.cpp +++ b/src/Interpreters/RuntimeFilter/RuntimeFilterManager.cpp @@ -106,7 +106,7 @@ size_t RuntimeFilterCollection::add(RuntimeFilterData data, UInt32 parallel_id) else { LOG_WARNING( - &Poco::Logger::get("RuntimeFilterCollection"), + getLogger("RuntimeFilterCollection"), "build rf receive duplicate id:{} will cause rf timeout", parallel_id); } @@ -246,7 +246,7 @@ void RuntimeFilterManager::removeQuery(const String & query_id) void RuntimeFilterManager::addDynamicValue( const String & query_id, RuntimeFilterId filter_id, DynamicData && dynamic_value, UInt32 ref_segment) { - LOG_TRACE(&Poco::Logger::get("RuntimeFilterManager"), "addDynamicValue: {}, {}", filter_id, dynamic_value.dump()); + LOG_TRACE(getLogger("RuntimeFilterManager"), "addDynamicValue: {}, {}", filter_id, dynamic_value.dump()); complete_runtime_filters .compute( makeKey(query_id, filter_id), diff --git a/src/Interpreters/RuntimeFilter/RuntimeFilterManager.h b/src/Interpreters/RuntimeFilter/RuntimeFilterManager.h index 5dc3c5be5b..6913e7c4de 100644 --- a/src/Interpreters/RuntimeFilter/RuntimeFilterManager.h +++ b/src/Interpreters/RuntimeFilter/RuntimeFilterManager.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -140,7 +141,7 @@ public: private: void routineCheck(); - RuntimeFilterManager() : log(&Poco::Logger::get("RuntimeFilterManager")) { initRoutineCheck(); } + RuntimeFilterManager() : log(getLogger("RuntimeFilterManager")) { initRoutineCheck(); } /** * Coordinator: Query Id -> RuntimeFilters @@ -152,7 +153,7 @@ private: */ ConcurrentHashMap complete_runtime_filters; - Poco::Logger * log; + LoggerPtr log; std::unique_ptr check_thread{nullptr}; std::atomic_bool need_stop = false; UInt64 clean_rf_time_limit = 300000; /// default 300s to timeout runtime filters diff --git a/src/Interpreters/RuntimeFilter/RuntimeFilterService.cpp b/src/Interpreters/RuntimeFilter/RuntimeFilterService.cpp index 698fc98701..f98ff7049c 100644 --- a/src/Interpreters/RuntimeFilter/RuntimeFilterService.cpp +++ b/src/Interpreters/RuntimeFilter/RuntimeFilterService.cpp @@ -29,7 +29,7 @@ namespace ErrorCodes static void onDispatchRuntimeFilter( - Poco::Logger * log, Protos::DispatchRuntimeFilterResponse * response, brpc::Controller * cntl, std::shared_ptr rpc_channel) + LoggerPtr log, Protos::DispatchRuntimeFilterResponse * response, brpc::Controller * cntl, std::shared_ptr rpc_channel) { std::unique_ptr response_guard(response); std::unique_ptr cntl_guard(cntl); diff --git a/src/Interpreters/RuntimeFilter/RuntimeFilterService.h b/src/Interpreters/RuntimeFilter/RuntimeFilterService.h index 07026e8b01..01df579d1b 100644 --- a/src/Interpreters/RuntimeFilter/RuntimeFilterService.h +++ b/src/Interpreters/RuntimeFilter/RuntimeFilterService.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -26,7 +27,7 @@ namespace DB class RuntimeFilterService : public Protos::RuntimeFilterService { public: - explicit RuntimeFilterService(ContextMutablePtr context_) : context(context_), log(&Poco::Logger::get("RuntimeFilterService")) { } + explicit RuntimeFilterService(ContextMutablePtr context_) : context(context_), log(getLogger("RuntimeFilterService")) { } /// transfer dynamic filer (segment executor host --> coordinator host) void transferRuntimeFilter( @@ -44,7 +45,7 @@ public: private: ContextMutablePtr context; - Poco::Logger * log; + LoggerPtr log; }; REGISTER_SERVICE_IMPL(RuntimeFilterService); diff --git a/src/Interpreters/SQLBinding/SQLBindingCache.cpp b/src/Interpreters/SQLBinding/SQLBindingCache.cpp index 889a2c2f14..aed31c21cc 100644 --- a/src/Interpreters/SQLBinding/SQLBindingCache.cpp +++ b/src/Interpreters/SQLBinding/SQLBindingCache.cpp @@ -27,7 +27,7 @@ void BindingCacheManager::initializeGlobalBinding(ContextMutablePtr & context) if (manager_instance->sql_binding_cache && manager_instance->re_binding_cache) { - LOG_WARNING(&Poco::Logger::get("BindingCacheManager"), "Global BindingCacheManager already initialized"); + LOG_WARNING(getLogger("BindingCacheManager"), "Global BindingCacheManager already initialized"); return; } @@ -39,7 +39,7 @@ void BindingCacheManager::initializeSessionBinding() { if (sql_binding_cache && re_binding_cache) { - LOG_WARNING(&Poco::Logger::get("BindingCacheManager"), "Sesion BindingCacheManager already initialized"); + LOG_WARNING(getLogger("BindingCacheManager"), "Sesion BindingCacheManager already initialized"); return; } diff --git a/src/Interpreters/SQLBinding/SQLBindingUtils.cpp b/src/Interpreters/SQLBinding/SQLBindingUtils.cpp index 08877937bc..edcf74a134 100644 --- a/src/Interpreters/SQLBinding/SQLBindingUtils.cpp +++ b/src/Interpreters/SQLBinding/SQLBindingUtils.cpp @@ -49,7 +49,7 @@ ASTPtr SQLBindingUtils::getASTFromBindings(const char * begin, const char * end, auto sql_binding_ptr = session_sql_cache.get(query_hash); if (sql_binding_ptr && sql_binding_ptr->target_ast) { - LOG_INFO(&Poco::Logger::get("SQL Binding"), "Session SQL Binding Hit"); + LOG_INFO(getLogger("SQL Binding"), "Session SQL Binding Hit"); return sql_binding_ptr->target_ast->clone(); } } @@ -64,7 +64,7 @@ ASTPtr SQLBindingUtils::getASTFromBindings(const char * begin, const char * end, && isMatchBinding(query.data(), query.data() + query.size(), *session_re_binding_ptr)) { InterpreterSetQuery(session_re_binding_ptr->settings->clone(), context).executeForCurrentContext(); - LOG_INFO(&Poco::Logger::get("SQL Binding"), "Regular Expression Binding Hit : {}", session_re_binding_ptr->pattern); + LOG_INFO(getLogger("SQL Binding"), "Regular Expression Binding Hit : {}", session_re_binding_ptr->pattern); return true; } } @@ -91,7 +91,7 @@ ASTPtr SQLBindingUtils::getASTFromBindings(const char * begin, const char * end, } catch (...) { - tryLogWarningCurrentException(&Poco::Logger::get("SQL Binding"), "Update Global BindingsCache Failed."); + tryLogWarningCurrentException(getLogger("SQL Binding"), "Update Global BindingsCache Failed."); } } @@ -102,7 +102,7 @@ ASTPtr SQLBindingUtils::getASTFromBindings(const char * begin, const char * end, auto global_sql_binding_ptr = global_sql_cache.get(query_hash); if (global_sql_binding_ptr && global_sql_binding_ptr->target_ast) { - LOG_INFO(&Poco::Logger::get("SQL Binding"), "Global SQL Binding Hit"); + LOG_INFO(getLogger("SQL Binding"), "Global SQL Binding Hit"); return global_sql_binding_ptr->target_ast->clone(); } } @@ -221,7 +221,7 @@ bool SQLBindingUtils::isMatchBinding(const char * begin, const char *, SQLBindin } catch (boost::wrapexcept &) { - LOG_ERROR(&Poco::Logger::get("SQL Binding"), "regex_match error"); + LOG_ERROR(getLogger("SQL Binding"), "regex_match error"); return false; } diff --git a/src/Interpreters/SegmentScheduler.h b/src/Interpreters/SegmentScheduler.h index 80817a40e6..0cf9fb1a1f 100644 --- a/src/Interpreters/SegmentScheduler.h +++ b/src/Interpreters/SegmentScheduler.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -75,7 +76,7 @@ struct SegmentSchedulerOptions class SegmentScheduler { public: - SegmentScheduler(): log(&Poco::Logger::get("SegmentScheduler")) {} + SegmentScheduler(): log(getLogger("SegmentScheduler")) {} virtual ~SegmentScheduler() {} PlanSegmentsStatusPtr insertPlanSegments(const String & query_id, PlanSegmentTree * plan_segments_ptr, @@ -135,7 +136,7 @@ private: bthread::Mutex bsp_scheduler_map_mutex; BspSchedulerMap bsp_scheduler_map; - Poco::Logger * log; + LoggerPtr log; void buildDAGGraph(PlanSegmentTree * plan_segments_ptr, std::shared_ptr graph); PlanSegmentExecutionInfo scheduleV2(const String & query_id, ContextPtr query_context, std::shared_ptr dag_graph_ptr); diff --git a/src/Interpreters/Set.h b/src/Interpreters/Set.h index 271f2677d9..5da051100f 100644 --- a/src/Interpreters/Set.h +++ b/src/Interpreters/Set.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -53,7 +54,7 @@ public: /// store all set elements in explicit form. /// This is needed for subsequent use for index. Set(const SizeLimits & limits_, bool fill_set_elements_, bool transform_null_in_) - : log(&Poco::Logger::get("Set")), + : log(getLogger("Set")), limits(limits_), fill_set_elements(fill_set_elements_), transform_null_in(transform_null_in_) { } @@ -127,7 +128,7 @@ private: /// Types for set_elements. DataTypes set_elements_types; - Poco::Logger * log; + LoggerPtr log; /// Limitations on the maximum size of the set SizeLimits limits; diff --git a/src/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp index 861dd58742..6ac2149954 100644 --- a/src/Interpreters/SystemLog.cpp +++ b/src/Interpreters/SystemLog.cpp @@ -79,7 +79,7 @@ std::shared_ptr createSystemLog( if (database != default_database_name) { /// System tables must be loaded before other tables, but loading order is undefined for all databases except `system` - LOG_ERROR(&Poco::Logger::get("SystemLog"), "Custom database name for a system table specified in config." + LOG_ERROR(getLogger("SystemLog"), "Custom database name for a system table specified in config." " Table `{}` will be created in `system` database instead of `{}`", table, database); database = default_database_name; } diff --git a/src/Interpreters/SystemLog.h b/src/Interpreters/SystemLog.h index cc3836200e..ab1b0bb723 100644 --- a/src/Interpreters/SystemLog.h +++ b/src/Interpreters/SystemLog.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -228,7 +229,9 @@ public: ASTPtr getCreateTableQuery(ParserSettingsImpl dt) override; protected: - Poco::Logger * log; + /// use raw logger here because TextLog requires logger->setLevel() capability + /// which is not supported by VirtualLogger + LoggerRawPtr log; /* Saving thread data */ const StorageID table_id; @@ -286,7 +289,7 @@ SystemLog::SystemLog( , flush_interval_milliseconds(flush_interval_milliseconds_) { assert((database_name_ == DatabaseCatalog::SYSTEM_DATABASE) || (database_name_ == CNCH_SYSTEM_LOG_DB_NAME)); - log = &Poco::Logger::get("SystemLog (" + database_name_ + "." + table_name_ + ")"); + log = getRawLogger("SystemLog (" + database_name_ + "." + table_name_ + ")"); } diff --git a/src/Interpreters/TableJoin.cpp b/src/Interpreters/TableJoin.cpp index f9dadb2180..57db9001f2 100644 --- a/src/Interpreters/TableJoin.cpp +++ b/src/Interpreters/TableJoin.cpp @@ -129,7 +129,7 @@ void TableJoin::addInequalConditions(const ASTs & inequal_conditions, const Name inequal_column_name = mixed_inequal_condition->getColumnName(); auto syntax_result = TreeRewriter(context).analyze(mixed_inequal_condition, columns_for_join); inequal_condition_actions = ExpressionAnalyzer(mixed_inequal_condition, syntax_result, context).getActions(false); - LOG_DEBUG(&Poco::Logger::get("TableJoin"), fmt::format("addInequalConditions: mixed_inequal_condition: {}", + LOG_DEBUG(getLogger("TableJoin"), fmt::format("addInequalConditions: mixed_inequal_condition: {}", queryToString(mixed_inequal_condition))); } @@ -517,7 +517,7 @@ bool TableJoin::inferJoinKeyCommonType(const NamesAndTypesList & left, const Nam return fmt::format("{}", fmt::join(text, ", ")); }; LOG_TRACE( - &Poco::Logger::get("TableJoin"), + getLogger("TableJoin"), "Infer supertype for joined columns. Left: [{}], Right: [{}]", format_type_map(left_type_map), format_type_map(right_type_map)); diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index f698af49bd..62444fc80f 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -108,7 +108,7 @@ struct TemporaryFileStream::OutputWriter , out_compressed_buf(*out_buf) , out_writer(out_compressed_buf, DBMS_TCP_PROTOCOL_VERSION, header_) { - LOG_TRACE(&Poco::Logger::get("TemporaryFileStream"), "Writing to temporary file {}", path); + LOG_TRACE(getLogger("TemporaryFileStream"), "Writing to temporary file {}", path); } OutputWriter(std::unique_ptr out_buf_, const Block & header_) @@ -116,7 +116,7 @@ struct TemporaryFileStream::OutputWriter , out_compressed_buf(*out_buf) , out_writer(out_compressed_buf, DBMS_TCP_PROTOCOL_VERSION, header_) { - LOG_TRACE(&Poco::Logger::get("TemporaryFileStream"), + LOG_TRACE(getLogger("TemporaryFileStream"), "Writing to temporary file {}", static_cast(out_buf.get())->getFileName()); } @@ -182,7 +182,7 @@ struct TemporaryFileStream::InputReader , in_compressed_buf(in_file_buf) , in_reader(in_compressed_buf, header_, DBMS_TCP_PROTOCOL_VERSION) { - LOG_TRACE(&Poco::Logger::get("TemporaryFileStream"), "Reading {} from {}", header_.dumpStructure(), path); + LOG_TRACE(getLogger("TemporaryFileStream"), "Reading {} from {}", header_.dumpStructure(), path); } explicit InputReader(const String & path) @@ -190,7 +190,7 @@ struct TemporaryFileStream::InputReader , in_compressed_buf(in_file_buf) , in_reader(in_compressed_buf, DBMS_TCP_PROTOCOL_VERSION) { - LOG_TRACE(&Poco::Logger::get("TemporaryFileStream"), "Reading from {}", path); + LOG_TRACE(getLogger("TemporaryFileStream"), "Reading from {}", path); } Block read() diff --git a/src/Interpreters/TreeRewriter.cpp b/src/Interpreters/TreeRewriter.cpp index 4aa47e1eb7..864669a4c4 100644 --- a/src/Interpreters/TreeRewriter.cpp +++ b/src/Interpreters/TreeRewriter.cpp @@ -1304,7 +1304,7 @@ void TreeRewriterResult::rewriteUnknownLeftJoinIdentifier(ASTPtr & query, NameSe ss << "Try rewrite identifier: "; for(const auto & identifier: need_rewrite_identifiers) ss << "'" << identifier << "' "; - LOG_DEBUG(&Poco::Logger::get("ExpressionAnalyzer"), ss.str()); + LOG_DEBUG(getLogger("ExpressionAnalyzer"), ss.str()); TablesWithColumns tables {*it}; TranslateQualifiedNamesVisitor::Data visitor_data(available_columns, tables, true, need_rewrite_identifiers, true, check_identifier_begin_valid); diff --git a/src/Interpreters/VirtualWarehouseHandle.cpp b/src/Interpreters/VirtualWarehouseHandle.cpp index e71f09605e..4a96a5cf4c 100644 --- a/src/Interpreters/VirtualWarehouseHandle.cpp +++ b/src/Interpreters/VirtualWarehouseHandle.cpp @@ -56,7 +56,7 @@ VirtualWarehouseHandleImpl::VirtualWarehouseHandleImpl( , name(std::move(name_)) , uuid(uuid_) , settings(settings_) - , log(&Poco::Logger::get(name + " (VirtualWarehouseHandle)")) + , log(getLogger(name + " (VirtualWarehouseHandle)")) { tryUpdateWorkerGroups(ForceUpdate); } diff --git a/src/Interpreters/VirtualWarehouseHandle.h b/src/Interpreters/VirtualWarehouseHandle.h index 499bdd06ba..0ab929f2f9 100644 --- a/src/Interpreters/VirtualWarehouseHandle.h +++ b/src/Interpreters/VirtualWarehouseHandle.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -176,7 +177,7 @@ private: const String name; const UUID uuid; VirtualWarehouseSettings settings; - Poco::Logger * log; + LoggerPtr log; /// In ByteHouse, a VW will be auto recycled (auto-suspend) if no new queries received for a period (5 minutes by default). /// And when user send queries to the VW again, ByteYard will make sure to send out the queries after workers are full ready. diff --git a/src/Interpreters/VirtualWarehousePool.cpp b/src/Interpreters/VirtualWarehousePool.cpp index d6bba8324e..388f6092cc 100644 --- a/src/Interpreters/VirtualWarehousePool.cpp +++ b/src/Interpreters/VirtualWarehousePool.cpp @@ -32,7 +32,7 @@ namespace ErrorCodes } VirtualWarehousePool::VirtualWarehousePool(ContextPtr global_context_) - : WithContext(global_context_), log(&Poco::Logger::get("VirtualWarehousePool")) + : WithContext(global_context_), log(getLogger("VirtualWarehousePool")) { } diff --git a/src/Interpreters/VirtualWarehousePool.h b/src/Interpreters/VirtualWarehousePool.h index 67ca761b09..04d6cbbabc 100644 --- a/src/Interpreters/VirtualWarehousePool.h +++ b/src/Interpreters/VirtualWarehousePool.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -51,7 +52,7 @@ private: void removeOrReplaceOutdatedVW(); - Poco::Logger * log {}; + LoggerPtr log {}; std::atomic last_update_time_ns{0}; }; diff --git a/src/Interpreters/VirtualWarehouseQueue.cpp b/src/Interpreters/VirtualWarehouseQueue.cpp index bcd0d52f88..5541e2b183 100644 --- a/src/Interpreters/VirtualWarehouseQueue.cpp +++ b/src/Interpreters/VirtualWarehouseQueue.cpp @@ -146,13 +146,13 @@ void enqueueVirtualWarehouseQueue(ContextMutablePtr context, ASTPtr & query_ast) } catch (...) { - LOG_DEBUG(&Poco::Logger::get("VirtualWarehouseQueue"), "only queue dml query"); + LOG_DEBUG(getLogger("VirtualWarehouseQueue"), "only queue dml query"); return; } if (ast_type != ASTType::ASTSelectQuery && ast_type != ASTType::ASTSelectWithUnionQuery && ast_type != ASTType::ASTInsertQuery && ast_type != ASTType::ASTDeleteQuery && ast_type != ASTType::ASTUpdateQuery) { - LOG_DEBUG(&Poco::Logger::get("VirtualWarehouseQueue"), "only queue dml query"); + LOG_DEBUG(getLogger("VirtualWarehouseQueue"), "only queue dml query"); return; } auto vw_handle = context->tryGetCurrentVW(); @@ -172,7 +172,7 @@ void enqueueVirtualWarehouseQueue(ContextMutablePtr context, ASTPtr & query_ast) if (query_ast) { auto fingerprint = SQLFingerprint().generateMD5(query_ast); - LOG_TRACE(&Poco::Logger::get("VirtualWarehouseQueueManager"), "sql : fingerprint is {}", fingerprint); + LOG_TRACE(getLogger("VirtualWarehouseQueueManager"), "sql : fingerprint is {}", fingerprint); vw_queue_info->query_rule.fingerprint = fingerprint; } auto queue_result = vw_handle->enqueue(vw_queue_info, context->getSettingsRef().vw_query_queue_timeout_ms); @@ -181,12 +181,12 @@ void enqueueVirtualWarehouseQueue(ContextMutablePtr context, ASTPtr & query_ast) if (queue_result == VWQueueResultStatus::QueueSuccess) { LOG_DEBUG( - &Poco::Logger::get("VirtualWarehouseQueueManager"), "query queue run time : {} ms", queue_watch.elapsedMilliseconds()); + getLogger("VirtualWarehouseQueueManager"), "query queue run time : {} ms", queue_watch.elapsedMilliseconds()); } else { LOG_ERROR( - &Poco::Logger::get("VirtualWarehouseQueueManager"), "query queue result : {}", VWQueueResultStatusToString(queue_result)); + getLogger("VirtualWarehouseQueueManager"), "query queue result : {}", VWQueueResultStatusToString(queue_result)); throw Exception( ErrorCodes::CNCH_QUEUE_QUERY_FAILURE, "query queue failed for query_id {}: {}", diff --git a/src/Interpreters/VirtualWarehouseQueue.h b/src/Interpreters/VirtualWarehouseQueue.h index cf10e54800..5f165bcd4e 100644 --- a/src/Interpreters/VirtualWarehouseQueue.h +++ b/src/Interpreters/VirtualWarehouseQueue.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -79,7 +80,7 @@ using QueueRuleWithRegexVec = std::vector; class VirtualWarehouseQueue { public: - VirtualWarehouseQueue() : log(&Poco::Logger::get("VirtualWarehouseQueue")) { } + VirtualWarehouseQueue() : log(getLogger("VirtualWarehouseQueue")) { } ~VirtualWarehouseQueue() { shutdown(); } void init(const std::string & queue_name_); void shutdown(); @@ -125,7 +126,7 @@ private: size_t max_concurrency{100}; size_t current_parallelize_size{0}; VWQueryQueue vw_query_queue; - Poco::Logger * log; + LoggerPtr log; //rules mutable std::shared_mutex rule_mutex; @@ -139,7 +140,7 @@ class VirtualWarehouseQueueManager public: void init(); - VirtualWarehouseQueueManager() : log(&Poco::Logger::get("VirtualWarehouseQueueManager")) { init(); } + VirtualWarehouseQueueManager() : log(getLogger("VirtualWarehouseQueueManager")) { init(); } ~VirtualWarehouseQueueManager() { shutdown(); } void shutdown(); void updateQueue(const std::vector & queue_datas); @@ -157,7 +158,7 @@ public: private: std::array(QueueName::Count)> query_queues; std::atomic is_stop{false}; - Poco::Logger * log; + LoggerPtr log; }; } // namespace DB diff --git a/src/Interpreters/WorkerGroupHandle.cpp b/src/Interpreters/WorkerGroupHandle.cpp index de80551d24..339693f8e8 100644 --- a/src/Interpreters/WorkerGroupHandle.cpp +++ b/src/Interpreters/WorkerGroupHandle.cpp @@ -111,7 +111,7 @@ WorkerGroupHandleImpl::WorkerGroupHandleImpl( if (address.is_local) info.local_addresses.push_back(address); - LOG_DEBUG(&Poco::Logger::get("WorkerGroupHandleImpl"), "Add address {}. is_local: {} id: {}", host.toDebugString(), address.is_local, host.id); + LOG_DEBUG(getLogger("WorkerGroupHandleImpl"), "Add address {}. is_local: {} id: {}", host.toDebugString(), address.is_local, host.id); ConnectionPoolPtr pool = std::make_shared( settings.distributed_connections_pool_size, @@ -129,7 +129,7 @@ WorkerGroupHandleImpl::WorkerGroupHandleImpl( } buildRing(); - LOG_DEBUG(&Poco::Logger::get("WorkerGroupHandleImpl"), "Success built ring with {} nodes\n", ring->size()); + LOG_DEBUG(getLogger("WorkerGroupHandleImpl"), "Success built ring with {} nodes\n", ring->size()); } WorkerGroupHandleImpl::WorkerGroupHandleImpl(const WorkerGroupData & data, const ContextPtr & context_) diff --git a/src/Interpreters/WorkerStatusManager.cpp b/src/Interpreters/WorkerStatusManager.cpp index bdec287193..a8b74b8f72 100644 --- a/src/Interpreters/WorkerStatusManager.cpp +++ b/src/Interpreters/WorkerStatusManager.cpp @@ -29,7 +29,7 @@ WorkerGroupStatus::~WorkerGroupStatus() for (const auto & half_open_id : half_open_workers) { global_context->getWorkerStatusManager()->restoreWorkerNode(half_open_id); - LOG_DEBUG(&Poco::Logger::get("WorkerStatusManager"), "restore half open worker {}", half_open_id.ToString()); + LOG_DEBUG(getLogger("WorkerStatusManager"), "restore half open worker {}", half_open_id.ToString()); } } } @@ -62,7 +62,7 @@ void WorkerGroupStatus::calculateStatus() status = WorkerGroupHealthStatus::Critical; LOG_DEBUG( - &Poco::Logger::get("WorkerStatusManager"), + getLogger("WorkerStatusManager"), "allWorkerSize: {} healthWorkerSize: {} unhealthWorkerSize: {} \ HeavyLoadSize: {} onlySourceSize: {} unknowWorkerSize: {} notConnectedWorkerSize: {} halfOpenChecking: {} halfOpen: {}", total_worker_size, @@ -84,7 +84,7 @@ std::optional> WorkerGroupStatus::selectHealthNode(const Hos } WorkerStatusManager::WorkerStatusManager(ContextWeakMutablePtr context_) - : WithContext(context_), log(&Poco::Logger::get("WorkerStatusManager")) + : WithContext(context_), log(getLogger("WorkerStatusManager")) { schedule_pool.emplace(1, CurrentMetrics::BackgroundRMHeartbeatSchedulePoolTask, "RMHeart"); startHeartbeat(*schedule_pool); diff --git a/src/Interpreters/WorkerStatusManager.h b/src/Interpreters/WorkerStatusManager.h index 2175daf2fe..f9eb221eeb 100644 --- a/src/Interpreters/WorkerStatusManager.h +++ b/src/Interpreters/WorkerStatusManager.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -371,7 +372,7 @@ private: AdaptiveSchedulerConfig adaptive_scheduler_config; mutable bthread::Mutex map_mutex; - Poco::Logger * log; + LoggerPtr log; // rm heartbeat mutable std::optional schedule_pool; std::atomic heartbeat_interval{10000}; /// in ms; diff --git a/src/Interpreters/executeDDLQueryOnCluster.cpp b/src/Interpreters/executeDDLQueryOnCluster.cpp index cacdf85a31..9381c92b0a 100644 --- a/src/Interpreters/executeDDLQueryOnCluster.cpp +++ b/src/Interpreters/executeDDLQueryOnCluster.cpp @@ -212,7 +212,7 @@ DDLQueryStatusInputStream::DDLQueryStatusInputStream(const String & zk_node_path : node_path(zk_node_path) , context(context_) , watch(CLOCK_MONOTONIC_COARSE) - , log(&Poco::Logger::get("DDLQueryStatusInputStream")) + , log(getLogger("DDLQueryStatusInputStream")) { if (context->getSettingsRef().distributed_ddl_output_mode == DistributedDDLOutputMode::THROW || context->getSettingsRef().distributed_ddl_output_mode == DistributedDDLOutputMode::NONE) diff --git a/src/Interpreters/executeDDLQueryOnCluster.h b/src/Interpreters/executeDDLQueryOnCluster.h index bbd39a6e8e..3f86ede485 100644 --- a/src/Interpreters/executeDDLQueryOnCluster.h +++ b/src/Interpreters/executeDDLQueryOnCluster.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -51,7 +52,7 @@ private: String node_path; ContextPtr context; Stopwatch watch; - Poco::Logger * log; + LoggerPtr log; Block sample; diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index f074ad26f4..e211fad365 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -226,13 +226,13 @@ void trySetVirtualWarehouseWithBackup(ContextMutablePtr & context, const ASTPtr auto idx = round_robin_count++ % (1 + backup_vws.size()); if (idx == 0) { - LOG_DEBUG(&Poco::Logger::get("executeQuery"), "use original vw to execute query"); + LOG_DEBUG(getLogger("executeQuery"), "use original vw to execute query"); trySetVirtualWarehouseAndWorkerGroup(ast, context); } else { ProfileEvents::increment(ProfileEvents::BackupVW, 1); - LOG_DEBUG(&Poco::Logger::get("executeQuery"), "backup round_robin choose {}", backup_vws[idx - 1]); + LOG_DEBUG(getLogger("executeQuery"), "backup round_robin choose {}", backup_vws[idx - 1]); use_backup_vw = true; trySetVirtualWarehouseAndWorkerGroup(backup_vws[idx - 1], context); } @@ -249,14 +249,14 @@ void trySetVirtualWarehouseWithBackup(ContextMutablePtr & context, const ASTPtr trySetVirtualWarehouseAndWorkerGroup(vw, context); ProfileEvents::increment(ProfileEvents::BackupVW, 1); use_backup_vw = true; - LOG_DEBUG(&Poco::Logger::get("executeQuery"), "backup vw choose {}", vw); + LOG_DEBUG(getLogger("executeQuery"), "backup vw choose {}", vw); break; } catch (const Exception &) { if (idx == backup_vws.size() - 1) { - LOG_DEBUG(&Poco::Logger::get("executeQuery"), "none of backup vws are available"); + LOG_DEBUG(getLogger("executeQuery"), "none of backup vws are available"); throw; } } @@ -288,14 +288,14 @@ void tryQueueQuery(ContextMutablePtr context, ASTPtr & query_ast) try { ast_type = query_ast->getType(); } catch (...) { - LOG_DEBUG(&Poco::Logger::get("executeQuery"), "only queue dml query"); + LOG_DEBUG(getLogger("executeQuery"), "only queue dml query"); return; } auto worker_group_handler = context->tryGetCurrentWorkerGroup(); if (ast_type != ASTType::ASTSelectQuery && ast_type != ASTType::ASTSelectWithUnionQuery && ast_type != ASTType::ASTInsertQuery && ast_type != ASTType::ASTDeleteQuery && ast_type != ASTType::ASTUpdateQuery) { - LOG_DEBUG(&Poco::Logger::get("executeQuery"), "only queue dml query"); + LOG_DEBUG(getLogger("executeQuery"), "only queue dml query"); return; } if (worker_group_handler) @@ -316,11 +316,11 @@ void tryQueueQuery(ContextMutablePtr context, ASTPtr & query_ast) { context->setCurrentWorkerGroup(current_vw->getWorkerGroup(wg_name)); } - LOG_DEBUG(&Poco::Logger::get("executeQuery"), "query queue run time : {} ms", queue_watch.elapsedMilliseconds()); + LOG_DEBUG(getLogger("executeQuery"), "query queue run time : {} ms", queue_watch.elapsedMilliseconds()); } else { - LOG_ERROR(&Poco::Logger::get("executeQuery"), "query queue result : {}", queueResultStatusToString(queue_result)); + LOG_ERROR(getLogger("executeQuery"), "query queue result : {}", queueResultStatusToString(queue_result)); throw Exception( ErrorCodes::CNCH_QUEUE_QUERY_FAILURE, "query queue failed for query_id {}: {}", @@ -422,7 +422,7 @@ static void logQuery(const String & query, ContextPtr context, bool internal) { if (internal) { - LOG_DEBUG(&Poco::Logger::get("executeQuery"), "(internal) {}", joinLines(query)); + LOG_DEBUG(getLogger("executeQuery"), "(internal) {}", joinLines(query)); } else { @@ -442,7 +442,7 @@ static void logQuery(const String & query, ContextPtr context, bool internal) comment = fmt::format(" (comment: {})", comment); LOG_DEBUG( - &Poco::Logger::get("executeQuery"), + getLogger("executeQuery"), "(from {}{}{}){} {}", client_info.current_address.toString(), (current_user != "default" ? ", user: " + current_user : ""), @@ -453,7 +453,7 @@ static void logQuery(const String & query, ContextPtr context, bool internal) if (client_info.client_trace_context.trace_id != UUID()) { LOG_TRACE( - &Poco::Logger::get("executeQuery"), + getLogger("executeQuery"), "OpenTelemetry traceparent '{}'", client_info.client_trace_context.composeTraceparentHeader()); } @@ -478,7 +478,7 @@ static void logException(ContextPtr context, QueryLogElement & elem) if (elem.stack_trace.empty()) LOG_ERROR( - &Poco::Logger::get("executeQuery"), + getLogger("executeQuery"), "{} (from {}){} (in query: {})", elem.exception, context->getClientInfo().current_address.toString(), @@ -486,7 +486,7 @@ static void logException(ContextPtr context, QueryLogElement & elem) joinLines(elem.query)); else LOG_ERROR( - &Poco::Logger::get("executeQuery"), + getLogger("executeQuery"), "{} (from {}){} (in query: {})" ", Stack trace (when copying this message, always include the lines below):\n\n{}", elem.exception, @@ -685,7 +685,7 @@ static TransactionCnchPtr prepareCnchTransaction(ContextMutablePtr context, [[ma return {}; if (auto txn = context->getCurrentTransaction(); txn) { - LOG_DEBUG(&Poco::Logger::get("executeQuery"), "Cnch query is already in a transaction " + txn->getTransactionRecord().toString()); + LOG_DEBUG(getLogger("executeQuery"), "Cnch query is already in a transaction " + txn->getTransactionRecord().toString()); return txn; } @@ -884,7 +884,7 @@ static std::tuple executeQueryImpl( } catch (...) { - tryLogWarningCurrentException(&Poco::Logger::get("SQL Binding"), "SQL binding match error."); + tryLogWarningCurrentException(getLogger("SQL Binding"), "SQL binding match error."); } } } @@ -897,7 +897,7 @@ static std::tuple executeQueryImpl( if (in_interactive_txn && isDDLQuery(context, ast)) { /// Commit the current explicit transaction - LOG_WARNING(&Poco::Logger::get("executeQuery"), "Receive DDL in interactive transaction session, will commit the session implicitly"); + LOG_WARNING(getLogger("executeQuery"), "Receive DDL in interactive transaction session, will commit the session implicitly"); InterpreterCommitQuery(nullptr, context).execute(); } @@ -906,7 +906,7 @@ static std::tuple executeQueryImpl( { auto host_ports = getTargetServer(context, ast); LOG_DEBUG( - &Poco::Logger::get("executeQuery"), + getLogger("executeQuery"), "target server is {} and local server is {}", host_ports.toDebugString(), context->getHostWithPorts().toDebugString()); @@ -915,11 +915,11 @@ static std::tuple executeQueryImpl( size_t query_size = (max_query_size == 0) ? (end - begin) : std::min(end - begin, static_cast(max_query_size)); String query = String(begin, begin + query_size); LOG_DEBUG( - &Poco::Logger::get("executeQuery"), "Will reroute query {} to {}", query, host_ports.toDebugString()); + getLogger("executeQuery"), "Will reroute query {} to {}", query, host_ports.toDebugString()); context->initializeExternalTablesIfSet(); context->setSetting("enable_auto_query_forwarding", Field(0)); executeQueryByProxy(context, host_ports, ast, res, in_interactive_txn, query); - LOG_DEBUG(&Poco::Logger::get("executeQuery"), "Query forwarded to remote server done"); + LOG_DEBUG(getLogger("executeQuery"), "Query forwarded to remote server done"); return std::make_tuple(ast, std::move(res)); } } @@ -955,7 +955,7 @@ static std::tuple executeQueryImpl( changes.insertSetting("max_bytes_before_external_sort", Field(104857600)); changes.insertSetting("exchange_queue_bytes", Field(1073741824)); // !!!! TODO @luocongkai support it for(auto &change: changes) { - LOG_WARNING(&Poco::Logger::get("executeQuery"), "SpillMode is AUTO, this setting will be overwriten, {}: {}->{}", change.name, context->getSettings().get(change.name).toString(), change.value.toString()); + LOG_WARNING(getLogger("executeQuery"), "SpillMode is AUTO, this setting will be overwriten, {}: {}->{}", change.name, context->getSettings().get(change.name).toString(), change.value.toString()); context->setSetting(change.name, change.value); } @@ -1026,7 +1026,7 @@ static std::tuple executeQueryImpl( trySetVirtualWarehouseWithBackup(context, ast, use_backup_vw); if (const auto wg = context->tryGetCurrentWorkerGroup()) { - LOG_DEBUG(&Poco::Logger::get("executeQuery"), "pick worker group {}", wg->getQualifiedName()); + LOG_DEBUG(getLogger("executeQuery"), "pick worker group {}", wg->getQualifiedName()); } if (context->getServerType() == ServerType::cnch_server) { @@ -1089,7 +1089,7 @@ static std::tuple executeQueryImpl( ProcessList::EntryPtr process_list_entry; if (!internal && !ast->as()) { - LOG_TRACE(&Poco::Logger::get("executeQuery"), "enqueue process list query :{}", query_for_logging); + LOG_TRACE(getLogger("executeQuery"), "enqueue process list query :{}", query_for_logging); /// processlist also has query masked now, to avoid secrets leaks though SHOW PROCESSLIST by other users. process_list_entry = context->getProcessList().insert(query_for_logging, ast.get(), context); QueryStatus & process_list_elem = process_list_entry->get(); @@ -1213,7 +1213,7 @@ static std::tuple executeQueryImpl( if (context->getSettingsRef().enable_optimizer_fallback && !no_fallback_error_codes.contains(getCurrentExceptionCode())) { tryLogWarningCurrentException( - &Poco::Logger::get("executeQuery"), "Query failed in optimizer enabled, try to fallback to simple query."); + getLogger("executeQuery"), "Query failed in optimizer enabled, try to fallback to simple query."); turnOffOptimizer(context, ast); if (auto session_resource = context->tryGetCnchServerResource()) @@ -1226,7 +1226,7 @@ static std::tuple executeQueryImpl( } else { - LOG_INFO(&Poco::Logger::get("executeQuery"), "Query failed in optimizer enabled, throw exception."); + LOG_INFO(getLogger("executeQuery"), "Query failed in optimizer enabled, throw exception."); throw; } } @@ -1234,7 +1234,7 @@ static std::tuple executeQueryImpl( !context->getSettingsRef().enable_optimizer && context->getSettingsRef().distributed_perfect_shard && context->getSettingsRef().fallback_perfect_shard) { - LOG_INFO(&Poco::Logger::get("executeQuery"), "Query failed in perfect-shard enabled, try to fallback to normal mode."); + LOG_INFO(getLogger("executeQuery"), "Query failed in perfect-shard enabled, try to fallback to normal mode."); InterpreterPerfectShard::turnOffPerfectShard(context, ast); auto retry_interpreter = InterpreterFactory::get(ast, context, stage); res = retry_interpreter->execute(); @@ -1250,19 +1250,19 @@ static std::tuple executeQueryImpl( && (!query_cache_context.query_executed_by_optimizer)) { const std::set storage_ids = res.pipeline.getUsedStorageIDs(); - LOG_DEBUG(&Poco::Logger::get("executeQuery"), + LOG_DEBUG(getLogger("executeQuery"), "pipeline has all used StorageIDs: {}", res.pipeline.hasAllUsedStorageIDs()); if (res.pipeline.hasAllUsedStorageIDs() && (!storage_ids.empty())) { - logUsedStorageIDs(&Poco::Logger::get("executeQuery"), storage_ids); + logUsedStorageIDs(getLogger("executeQuery"), storage_ids); TxnTimestamp & source_update_time_for_query_cache = query_cache_context.source_update_time_for_query_cache; if (settings.enable_transactional_query_cache) source_update_time_for_query_cache = getMaxUpdateTime(storage_ids, context); else source_update_time_for_query_cache = TxnTimestamp::minTS(); - LOG_DEBUG(&Poco::Logger::get("executeQuery"), "max update timestamp {}", source_update_time_for_query_cache); + LOG_DEBUG(getLogger("executeQuery"), "max update timestamp {}", source_update_time_for_query_cache); if ((settings.enable_transactional_query_cache == false) || (source_update_time_for_query_cache.toUInt64() != 0)) { @@ -1389,7 +1389,7 @@ static std::tuple executeQueryImpl( } else { - LOG_INFO(&Poco::Logger::get("executeQuery"), "not write to cache"); + LOG_INFO(getLogger("executeQuery"), "not write to cache"); } } @@ -1634,7 +1634,7 @@ static std::tuple executeQueryImpl( if (elem.read_rows != 0) { LOG_INFO( - &Poco::Logger::get("executeQuery"), + getLogger("executeQuery"), "Read {} rows, {} in {} sec., {} rows/sec., {}/sec.", elem.read_rows, ReadableSize(elem.read_bytes), @@ -1834,7 +1834,7 @@ static std::tuple executeQueryImpl( { WriteBufferFromOwnString msg_buf; res.in->dumpTree(msg_buf); - LOG_DEBUG(&Poco::Logger::get("executeQuery"), "Query pipeline:\n{}", msg_buf.str()); + LOG_DEBUG(getLogger("executeQuery"), "Query pipeline:\n{}", msg_buf.str()); } } } @@ -2290,7 +2290,7 @@ void updateAsyncQueryStatus( if (!context->getCnchCatalog()->tryGetAsyncQueryStatus(async_query_id, async_query_status)) { LOG_WARNING( - &Poco::Logger::get("executeQuery"), "async query status not found, insert new one with async_query_id: {}", async_query_id); + getLogger("executeQuery"), "async query status not found, insert new one with async_query_id: {}", async_query_id); async_query_status.set_id(async_query_id); async_query_status.set_query_id(query_id); } diff --git a/src/Interpreters/executeQueryHelper.cpp b/src/Interpreters/executeQueryHelper.cpp index 8e8afae146..1ee43ec9bd 100644 --- a/src/Interpreters/executeQueryHelper.cpp +++ b/src/Interpreters/executeQueryHelper.cpp @@ -62,7 +62,7 @@ HostWithPorts getTargetServer(ContextPtr context, ASTPtr & ast) // simplily use the first table if there are multiple tables used DatabaseAndTableWithAlias db_and_table(tables[0]); LOG_DEBUG( - &Poco::Logger::get("executeQuery"), + getLogger("executeQuery"), "Extract db and table {}.{} from the query.", db_and_table.database, db_and_table.table); @@ -134,7 +134,7 @@ void executeQueryByProxy(ContextMutablePtr context, const HostWithPorts & server res.remote_execution_conn->setDefaultDatabase(context->getCurrentDatabase()); // PipelineExecutor requires block header. - LOG_DEBUG(&Poco::Logger::get("executeQuery"), "Sending query as ordinary query"); + LOG_DEBUG(getLogger("executeQuery"), "Sending query as ordinary query"); Block header; if (context->getSettingsRef().enable_select_query_forwarding && ast->as()) { @@ -166,14 +166,14 @@ void executeQueryByProxy(ContextMutablePtr context, const HostWithPorts & server if (proxy_txn) proxy_txn->setTransactionStatus(CnchTransactionStatus::Finished); - LOG_DEBUG(&Poco::Logger::get("executeQuery"), "Query success on remote server"); + LOG_DEBUG(getLogger("executeQuery"), "Query success on remote server"); }; res.exception_callback = [proxy_txn, context]() { if (proxy_txn) proxy_txn->setTransactionStatus(CnchTransactionStatus::Aborted); - LOG_DEBUG(&Poco::Logger::get("executeQuery"), "Query failed on remote server"); + LOG_DEBUG(getLogger("executeQuery"), "Query failed on remote server"); }; } diff --git a/src/Interpreters/loadMetadata.cpp b/src/Interpreters/loadMetadata.cpp index cfded4748f..811c6c3f16 100644 --- a/src/Interpreters/loadMetadata.cpp +++ b/src/Interpreters/loadMetadata.cpp @@ -125,7 +125,7 @@ static void loadDatabase( void loadMetadata(ContextMutablePtr context, const String & default_database_name) { - Poco::Logger * log = &Poco::Logger::get("loadMetadata"); + LoggerPtr log = getLogger("loadMetadata"); String path = context->getPath() + "metadata"; @@ -238,7 +238,7 @@ void loadMetadataSystem(ContextMutablePtr context) } /* Load schema files from hdfs*/ -void reloadFormatSchema(ContextMutablePtr context, String remote_format_schema_path, String format_schema_path, Poco::Logger * log) +void reloadFormatSchema(ContextMutablePtr context, String remote_format_schema_path, String format_schema_path, LoggerPtr log) { #if USE_HDFS if (!remote_format_schema_path.empty()) diff --git a/src/Interpreters/loadMetadata.h b/src/Interpreters/loadMetadata.h index 19400ac41d..98675dd6df 100644 --- a/src/Interpreters/loadMetadata.h +++ b/src/Interpreters/loadMetadata.h @@ -21,6 +21,7 @@ #pragma once +#include #include @@ -37,6 +38,6 @@ void loadMetadata(ContextMutablePtr context, const String & default_database_nam void reloadFormatSchema(ContextMutablePtr context, String remote_format_schema_path, String format_schema_path, - Poco::Logger * log = nullptr); + LoggerPtr log = nullptr); } diff --git a/src/Interpreters/profile/ProfileLogHub.h b/src/Interpreters/profile/ProfileLogHub.h index 25f6e89214..829a968d71 100644 --- a/src/Interpreters/profile/ProfileLogHub.h +++ b/src/Interpreters/profile/ProfileLogHub.h @@ -1,3 +1,4 @@ +#include #include #include #include @@ -34,7 +35,7 @@ public: return profile_log_hub; } - explicit ProfileLogHub() : consume_thread_pool(std::make_unique(10)), logger(&Poco::Logger::get("ProfileLogHub")) { } + explicit ProfileLogHub() : consume_thread_pool(std::make_unique(10)), logger(getLogger("ProfileLogHub")) { } ~ProfileLogHub() = default; @@ -81,7 +82,7 @@ private: Consumers profile_element_consumers; std::unique_ptr consume_thread_pool; bthread::Mutex mutex; - Poco::Logger * logger; + LoggerPtr logger; }; template diff --git a/src/Interpreters/sendPlanSegment.cpp b/src/Interpreters/sendPlanSegment.cpp index 23e6a31879..925049f3a7 100644 --- a/src/Interpreters/sendPlanSegment.cpp +++ b/src/Interpreters/sendPlanSegment.cpp @@ -31,7 +31,7 @@ void sendPlanSegmentToAddress( std::shared_ptr plan_segment_buf_ptr, const WorkerId & worker_id) { - static auto * log = &Poco::Logger::get("SegmentScheduler::sendPlanSegment"); + static auto log = getLogger("SegmentScheduler::sendPlanSegment"); LOG_TRACE( log, "query id {} segment id {}, parallel index {}, address {}, addtional filters {}, plansegment {}", diff --git a/src/Interpreters/tests/gtest_exchange_source_step.cpp b/src/Interpreters/tests/gtest_exchange_source_step.cpp index 27ecd0efe6..82b34cf2f7 100644 --- a/src/Interpreters/tests/gtest_exchange_source_step.cpp +++ b/src/Interpreters/tests/gtest_exchange_source_step.cpp @@ -101,7 +101,7 @@ TEST(ExchangeSourceStep, InitializePipelineTest) QueryPipeline pipeline; exchange_source_step.initializePipeline(pipeline, BuildQueryPipelineSettings::fromContext(context)); - PlanSegmentExecutor::registerAllExchangeReceivers(&Poco::Logger::get("PlanSegmentExecutor"), pipeline, 200); + PlanSegmentExecutor::registerAllExchangeReceivers(getLogger("PlanSegmentExecutor"), pipeline, 200); Chunk chunk = createUInt8Chunk(10, 1, 8); auto total_bytes = chunk.bytes(); diff --git a/src/Interpreters/trySetVirtualWarehouse.cpp b/src/Interpreters/trySetVirtualWarehouse.cpp index d78b6aae48..1f998dd0bd 100644 --- a/src/Interpreters/trySetVirtualWarehouse.cpp +++ b/src/Interpreters/trySetVirtualWarehouse.cpp @@ -68,7 +68,7 @@ static bool trySetVirtualWarehouseFromStorageID( : cnch_table->getSettings()->cnch_vw_default; LOG_DEBUG( - &Poco::Logger::get("trySetVirtualWarehouse"), + getLogger("trySetVirtualWarehouse"), "set vw to {} from cnch table {}, type is WRITE {}", vw_name, table_id.getNameForLogs(), @@ -109,7 +109,7 @@ static bool trySetVirtualWarehouseFromStorageID( : nested_table->getSettings()->cnch_vw_default; LOG_DEBUG( - &Poco::Logger::get("trySetVirtualWarehouse"), + getLogger("trySetVirtualWarehouse"), "set vw to {} from nested cnch table {}, type is WRITE {}", nested_vw_name, nested_table->getStorageID().getNameForLogs(), @@ -530,7 +530,7 @@ bool trySetVirtualWarehouseAndWorkerGroup(const std::string & vw_name, ContextMu auto value = context->getSettingsRef().vw_schedule_algo.value; auto algo = ResourceManagement::toVWScheduleAlgo(&value[0]); auto worker_group = context->getCurrentVW()->pickWorkerGroup(algo); - LOG_DEBUG(&Poco::Logger::get("VirtualWarehouse"), "Picked worker group {}", worker_group->getQualifiedName()); + LOG_DEBUG(getLogger("VirtualWarehouse"), "Picked worker group {}", worker_group->getQualifiedName()); context->setCurrentWorkerGroup(std::move(worker_group)); return true; } diff --git a/src/MergeTreeCommon/CnchServerManager.h b/src/MergeTreeCommon/CnchServerManager.h index 72e992b6bc..96b107ca9e 100644 --- a/src/MergeTreeCommon/CnchServerManager.h +++ b/src/MergeTreeCommon/CnchServerManager.h @@ -16,6 +16,7 @@ #pragma once #include +#include #include #include #include @@ -62,7 +63,7 @@ private: /// set topology status when becoming leader. may runs in background tasks. void initLeaderStatus(); - Poco::Logger * log = &Poco::Logger::get("CnchServerManager"); + LoggerPtr log = getLogger("CnchServerManager"); BackgroundSchedulePool::TaskHolder topology_refresh_task; BackgroundSchedulePool::TaskHolder lease_renew_task; diff --git a/src/MergeTreeCommon/CnchStorageCommon.cpp b/src/MergeTreeCommon/CnchStorageCommon.cpp index e0800c45d3..58286c3a86 100644 --- a/src/MergeTreeCommon/CnchStorageCommon.cpp +++ b/src/MergeTreeCommon/CnchStorageCommon.cpp @@ -96,7 +96,7 @@ bool CnchStorageCommonHelper::healthCheckForWorkerGroup(ContextPtr context, Work catch (const NetException &) { remove_marks[i] = 1; - LOG_INFO(&Poco::Logger::get("CnchStorageCommonHelper"), "Unhealthy worker {} is skipped.", + LOG_INFO(getLogger("CnchStorageCommonHelper"), "Unhealthy worker {} is skipped.", (worker_group->getHostWithPortsVec()[i]).id); } }); @@ -335,7 +335,7 @@ String CnchStorageCommonHelper::getCreateQueryForCloudTable( formatAST(create_query, statement_buf, false); writeChar('\n', statement_buf); LOG_TRACE( - &Poco::Logger::get("getCreateQueryForCloudTable"), "create query for cloud table is {}", statement_buf.str()); + getLogger("getCreateQueryForCloudTable"), "create query for cloud table is {}", statement_buf.str()); return statement_buf.str(); } @@ -373,7 +373,7 @@ bool CnchStorageCommonHelper::forwardQueryToServerIfNeeded(ContextPtr query_cont query = query_status.query; } LOG_DEBUG( - &Poco::Logger::get("CnchStorageCommonHelper"), "Send query `{}` to server {}", query, host_port.toDebugString()); + getLogger("CnchStorageCommonHelper"), "Send query `{}` to server {}", query, host_port.toDebugString()); RemoteBlockInputStream stream(connection, query, {}, query_context); NullBlockOutputStream output({}); diff --git a/src/MergeTreeCommon/CnchTopologyMaster.h b/src/MergeTreeCommon/CnchTopologyMaster.h index 1be8e78084..b9cf092fd1 100644 --- a/src/MergeTreeCommon/CnchTopologyMaster.h +++ b/src/MergeTreeCommon/CnchTopologyMaster.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -58,7 +59,7 @@ private: bool allow_empty_result, bool allow_tso_unavailable); - Poco::Logger * log = &Poco::Logger::get("CnchTopologyMaster"); + LoggerPtr log = getLogger("CnchTopologyMaster"); BackgroundSchedulePool::TaskHolder topology_fetcher; std::list topologies; const Settings settings; diff --git a/src/MergeTreeCommon/GlobalGCManager.cpp b/src/MergeTreeCommon/GlobalGCManager.cpp index 11ee92ab3e..c012e6094c 100644 --- a/src/MergeTreeCommon/GlobalGCManager.cpp +++ b/src/MergeTreeCommon/GlobalGCManager.cpp @@ -40,7 +40,7 @@ GlobalGCManager::GlobalGCManager( size_t default_max_threads, size_t default_max_free_threads, size_t default_max_queue_size) - : WithContext(global_context_), log(&Poco::Logger::get("GlobalGCManager")) + : WithContext(global_context_), log(getLogger("GlobalGCManager")) { const auto & config_ref = getContext()->getConfigRef(); this->max_threads = @@ -94,7 +94,7 @@ size_t amountOfWorkCanReceive(size_t max_threads, size_t deleting_table_num) } namespace { - void cleanS3Disks(const StoragePtr & storage, const MergeTreeMetaBase & mergetree_meta, const Context & context, Poco::Logger * log) + void cleanS3Disks(const StoragePtr & storage, const MergeTreeMetaBase & mergetree_meta, const Context & context, LoggerPtr log) { auto catalog = context.getCnchCatalog(); Strings partition_ids = catalog->getPartitionIDs(storage, &context); @@ -157,7 +157,7 @@ namespace { clean_pool.wait(); } -void cleanDisks(const Disks & disks, const String & relative_path, Poco::Logger * log) +void cleanDisks(const Disks & disks, const String & relative_path, LoggerPtr log) { for (const DiskPtr & disk : disks) { @@ -256,7 +256,7 @@ std::optional getCleanableTrashTable( return snapshot->commit_time() + TxnTimestamp::fromUnixTimestamp(snapshot->ttl_in_days() * 3600 * 24) < ts; }); - auto * log = &Poco::Logger::get("getCleanableTrashTable"); + auto log = getLogger("getCleanableTrashTable"); for (const auto & [beg, end] : lifespans) { LOG_TRACE(log, "lifespan [{} - {})", beg, end); @@ -275,7 +275,7 @@ std::optional getCleanableTrashTable( return table_versions.back(); } -bool executeGlobalGC(const Protos::DataModelTable & table, const Context & context, Poco::Logger * log) +bool executeGlobalGC(const Protos::DataModelTable & table, const Context & context, LoggerPtr log) { auto storage_id = StorageID{table.database(), table.name(), RPCHelpers::createUUID(table.uuid())}; @@ -534,7 +534,7 @@ bool GlobalGCManager::schedule(std::vector tables) return true; } -void GlobalGCManager::systemCleanTrash(ContextPtr local_context, StorageID storage_id, Poco::Logger * log) +void GlobalGCManager::systemCleanTrash(ContextPtr local_context, StorageID storage_id, LoggerPtr log) { const UInt64 retention_sec = local_context->getSettingsRef().cnch_data_retention_time_in_sec; auto catalog = local_context->getCnchCatalog(); diff --git a/src/MergeTreeCommon/GlobalGCManager.h b/src/MergeTreeCommon/GlobalGCManager.h index e152ddd91c..c4f217c084 100644 --- a/src/MergeTreeCommon/GlobalGCManager.h +++ b/src/MergeTreeCommon/GlobalGCManager.h @@ -13,6 +13,7 @@ * limitations under the License. */ +#include #include #include #include @@ -41,8 +42,8 @@ std::optional getCleanableTrashTable( UInt64 retention_sec, String * fail_reason = nullptr); -using GlobalGCExecuter = std::function; -bool executeGlobalGC(const Protos::DataModelTable & table, const Context & context, Poco::Logger * log); +using GlobalGCExecuter = std::function; +bool executeGlobalGC(const Protos::DataModelTable & table, const Context & context, LoggerPtr log); size_t calculateApproximateWorkLimit(size_t max_threads); bool canReceiveMoreWork(size_t max_threads, size_t deleting_table_num, size_t num_of_new_tables); @@ -90,7 +91,7 @@ public: std::set getDeletingUUIDs() const; bool isShutdown() const; - static void systemCleanTrash(ContextPtr local_context, StorageID storage_id, Poco::Logger * log); + static void systemCleanTrash(ContextPtr local_context, StorageID storage_id, LoggerPtr log); private: bool scheduleImpl(std::vector && tables); @@ -101,7 +102,7 @@ private: std::set deleting_uuids; std::unique_ptr threadpool; bool is_shutdown = false; - Poco::Logger * log; + LoggerPtr log; GlobalGCHelpers::GlobalGCExecuter executor = GlobalGCHelpers::executeGlobalGC; }; diff --git a/src/MergeTreeCommon/MergeTreeDataDeduper.cpp b/src/MergeTreeCommon/MergeTreeDataDeduper.cpp index 8ca93a6922..3075a1e87c 100644 --- a/src/MergeTreeCommon/MergeTreeDataDeduper.cpp +++ b/src/MergeTreeCommon/MergeTreeDataDeduper.cpp @@ -45,7 +45,7 @@ using IndexFileIterators = std::vector; MergeTreeDataDeduper::MergeTreeDataDeduper( const MergeTreeMetaBase & data_, ContextPtr context_, const CnchDedupHelper::DedupMode & dedup_mode_) - : data(data_), context(context_), log(&Poco::Logger::get(data_.getLogName() + " (Deduper)")), dedup_mode(dedup_mode_) + : data(data_), context(context_), log(getLogger(data_.getLogName() + " (Deduper)")), dedup_mode(dedup_mode_) { if (data.merging_params.hasExplicitVersionColumn()) version_mode = VersionMode::ExplicitVersion; @@ -223,7 +223,7 @@ namespace return std::move(res); } - void dumpBlockForLogging(const Block & block, String block_stage, Poco::Logger * log) + void dumpBlockForLogging(const Block & block, String block_stage, LoggerPtr log) { LOG_DEBUG(log, "{}: {}", block_stage, block.dumpStructure()); for (size_t row_num = 0; row_num < block.rows(); row_num++) diff --git a/src/MergeTreeCommon/MergeTreeDataDeduper.h b/src/MergeTreeCommon/MergeTreeDataDeduper.h index e7dceb23b0..28b56c02c8 100644 --- a/src/MergeTreeCommon/MergeTreeDataDeduper.h +++ b/src/MergeTreeCommon/MergeTreeDataDeduper.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -200,7 +201,7 @@ private: const MergeTreeMetaBase & data; ContextPtr context; - Poco::Logger * log; + LoggerPtr log; VersionMode version_mode; CnchDedupHelper::DedupMode dedup_mode; }; diff --git a/src/MergeTreeCommon/MergeTreeMetaBase.cpp b/src/MergeTreeCommon/MergeTreeMetaBase.cpp index 576b7a82ee..513adafe22 100644 --- a/src/MergeTreeCommon/MergeTreeMetaBase.cpp +++ b/src/MergeTreeCommon/MergeTreeMetaBase.cpp @@ -129,7 +129,7 @@ MergeTreeMetaBase::MergeTreeMetaBase( , require_part_metadata(require_part_metadata_) , broken_part_callback(broken_part_callback_) , log_name(logger_name_) - , log(&Poco::Logger::get(log_name)) + , log(::getLogger(log_name)) , storage_settings(std::move(storage_settings_)) , pinned_part_uuids(std::make_shared()) , data_parts_by_info(data_parts_indexes.get()) @@ -2002,7 +2002,7 @@ ASTPtr MergeTreeMetaBase::applyFilter( { double selectivity = FilterEstimator::estimateFilterSelectivity(storage_statistics, conjunct, names_and_types, query_context); LOG_DEBUG( - &Poco::Logger::get("OptimizerActivePrewhere"), + ::getLogger("OptimizerActivePrewhere"), "conjunct=" + serializeAST(*conjunct) + ", selectivity=" + std::to_string(selectivity)); if (selectivity <= query_context->getSettingsRef().max_active_prewhere_selectivity @@ -2052,7 +2052,7 @@ ASTPtr MergeTreeMetaBase::applyFilter( std::move(column_compressed_sizes), getInMemoryMetadataPtr(), current_info.syntax_analyzer_result->requiredSourceColumns(), - &Poco::Logger::get("OptimizerEarlyPrewherePushdown")}; + ::getLogger("OptimizerEarlyPrewherePushdown")}; } } diff --git a/src/MergeTreeCommon/MergeTreeMetaBase.h b/src/MergeTreeCommon/MergeTreeMetaBase.h index aeab234c3e..c196bdb6c8 100644 --- a/src/MergeTreeCommon/MergeTreeMetaBase.h +++ b/src/MergeTreeCommon/MergeTreeMetaBase.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -181,7 +182,7 @@ public: /// Logger const String & getLogName() const { return log_name; } - Poco::Logger * getLogger() const override { return log; } + LoggerPtr getLogger() const { return log; } /// A global unique id for the storage. If storage UUID is not empty, use the storage UUID. Otherwise, use the address of current object. String getStorageUniqueID() const; @@ -482,7 +483,7 @@ protected: String storage_address; String log_name; - Poco::Logger * log; + LoggerPtr log; /// Storage settings. /// Use get and set to receive readonly versions. diff --git a/src/MergeTreeCommon/StorageDataManager.h b/src/MergeTreeCommon/StorageDataManager.h index 6e7e7ed5a4..72645042af 100644 --- a/src/MergeTreeCommon/StorageDataManager.h +++ b/src/MergeTreeCommon/StorageDataManager.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -41,7 +42,7 @@ private: std::shared_mutex mutex; std::map versions; - Poco::Logger * log = &Poco::Logger::get("StorageDataManager"); + LoggerPtr log = getLogger("StorageDataManager"); }; using StorageDataManagerPtr = std::shared_ptr; diff --git a/src/MergeTreeCommon/TableVersion.cpp b/src/MergeTreeCommon/TableVersion.cpp index b4d430c13d..7cb8bd1274 100644 --- a/src/MergeTreeCommon/TableVersion.cpp +++ b/src/MergeTreeCommon/TableVersion.cpp @@ -278,7 +278,7 @@ void TableVersion::dropDiskCache(ThreadPool & pool) } catch(...) { - tryLogCurrentException(&Poco::Logger::get("TableVersion"), "Error occurs when drop manifest disk cache : " + segment_name); + tryLogCurrentException(getLogger("TableVersion"), "Error occurs when drop manifest disk cache : " + segment_name); } }; diff --git a/src/MergeTreeCommon/TableVersion.h b/src/MergeTreeCommon/TableVersion.h index 1be6c7617c..a552148462 100644 --- a/src/MergeTreeCommon/TableVersion.h +++ b/src/MergeTreeCommon/TableVersion.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -62,7 +63,7 @@ private: DataModelPartWrapperVector data_parts; DeleteBitmapMetaPtrVector delete_bitmaps; - Poco::Logger * log = &Poco::Logger::get("TableVersion"); + LoggerPtr log = getLogger("TableVersion"); }; using TableVersionPtr = std::shared_ptr; diff --git a/src/MergeTreeCommon/assignCnchParts.cpp b/src/MergeTreeCommon/assignCnchParts.cpp index fd27f2a636..384d0792b1 100644 --- a/src/MergeTreeCommon/assignCnchParts.cpp +++ b/src/MergeTreeCommon/assignCnchParts.cpp @@ -39,7 +39,7 @@ namespace DB { template -inline void reportStats(Poco::Logger * log, const M & map, const String & name, size_t num_workers) +inline void reportStats(LoggerPtr log, const M & map, const String & name, size_t num_workers) { std::stringstream ss; ss << name << " : "; @@ -76,7 +76,7 @@ template std::unordered_map assignCnchParts std::unordered_map assignCnchParts(const WorkerGroupHandle & worker_group, const DataPartsCnchVector & parts, const ContextPtr & query_context, MergeTreeSettingsPtr settings, std::optional allocator) { - static auto * log = &Poco::Logger::get("assignCnchParts"); + static auto log = getLogger("assignCnchParts"); Context::PartAllocator part_allocation_algorithm = allocator.value_or(query_context->getPartAllocationAlgo(settings)); switch (part_allocation_algorithm) @@ -159,7 +159,7 @@ std::unordered_map assignCnchPartsWithJump( /// 2 round apporach template -std::unordered_map assignCnchPartsWithRingAndBalance(Poco::Logger * log, WorkerList worker_ids, const std::unordered_map & worker_hosts, const ConsistentHashRing & ring, const DataPartsCnchVector & parts) +std::unordered_map assignCnchPartsWithRingAndBalance(LoggerPtr log, WorkerList worker_ids, const std::unordered_map & worker_hosts, const ConsistentHashRing & ring, const DataPartsCnchVector & parts) { LOG_INFO(log, "Consistent Hash: Start to allocate part with bounded ring based hash policy."); std::unordered_map ret; @@ -214,7 +214,7 @@ std::unordered_map assignCnchPartsWithRingAndBalanc // 1 round approach template -static std::unordered_map assignCnchPartsWithStrictBoundedHash(Poco::Logger * log, WorkerList worker_ids, const std::unordered_map & worker_hosts, const ConsistentHashRing & ring, const DataPartsCnchVector & parts, bool strict) +static std::unordered_map assignCnchPartsWithStrictBoundedHash(LoggerPtr log, WorkerList worker_ids, const std::unordered_map & worker_hosts, const ConsistentHashRing & ring, const DataPartsCnchVector & parts, bool strict) { LOG_DEBUG(log, "Strict Bounded Consistent Hash: Start to allocate part with bounded ring based hash policy under strict mode " + std::to_string(strict) + "."); std::unordered_map ret; @@ -397,7 +397,7 @@ size_t computeVirtualPartSize(size_t min_rows_per_vp, size_t index_granularity) } static std::pair assignCnchHybridPartsWithMod( - Poco::Logger * log, const WorkerGroupHandle & worker_group, const ServerDataPartsVector & parts, size_t virtual_part_size /* unit = num marks */) + LoggerPtr log, const WorkerGroupHandle & worker_group, const ServerDataPartsVector & parts, size_t virtual_part_size /* unit = num marks */) { std::pair res; auto & physical_assignment = res.first; @@ -523,7 +523,7 @@ static std::pair assignCnchHybrid } static std::pair assignCnchHybridPartsWithStrictBoundedHash( - Poco::Logger * log, + LoggerPtr log, const WorkerGroupHandle & worker_group, const ServerDataPartsVector & parts, size_t virtual_part_size /* unit = num marks */, @@ -588,7 +588,7 @@ static std::pair assignCnchHybrid } static std::pair assignCnchHybridPartsWithConsistentHash( - Poco::Logger * log, const WorkerGroupHandle & worker_group, const ServerDataPartsVector & parts, size_t virtual_part_size /* unit = num marks */) + LoggerPtr log, const WorkerGroupHandle & worker_group, const ServerDataPartsVector & parts, size_t virtual_part_size /* unit = num marks */) { const auto & ring = worker_group->getRing(); std::pair res; @@ -700,7 +700,7 @@ void mergeConsecutiveRanges(VirtualPartAssignmentMap & virtual_part_assignment) } } -static void reportHybridAllocStats(Poco::Logger * log, ServerAssignmentMap & physical_parts, VirtualPartAssignmentMap & virtual_parts, const String & name) +static void reportHybridAllocStats(LoggerPtr log, ServerAssignmentMap & physical_parts, VirtualPartAssignmentMap & virtual_parts, const String & name) { std::unordered_map allocated_marks; for (const auto & physical_part : physical_parts) @@ -770,7 +770,7 @@ ServerVirtualPartVector getVirtualPartVector(const ServerDataPartsVector & parts std::pair assignCnchHybridParts( const WorkerGroupHandle & worker_group, const ServerDataPartsVector & parts, size_t virtual_part_size /* unit = num marks */, const ContextPtr & query_context) { - static auto * log = &Poco::Logger::get("assignCnchHybridParts"); + static auto log = getLogger("assignCnchHybridParts"); // If part_allocation_algorithm is specified in SQL Level, use it with high priority Context::HybridPartAllocator part_allocation_algorithm; if (query_context->getSettingsRef().cnch_hybrid_part_allocation_algorithm.changed) diff --git a/src/MergeTreeCommon/tests/gtest_global_gc.cpp b/src/MergeTreeCommon/tests/gtest_global_gc.cpp index 4187a1f979..c0a199c6e9 100644 --- a/src/MergeTreeCommon/tests/gtest_global_gc.cpp +++ b/src/MergeTreeCommon/tests/gtest_global_gc.cpp @@ -19,7 +19,7 @@ namespace GTEST_GLOBAL_GC { std::mutex mutex1; // to hang executeGlobalGCDummy1 -bool executeGlobalGCDummy1(const Protos::DataModelTable & /*table*/, const Context &, Poco::Logger *) +bool executeGlobalGCDummy1(const Protos::DataModelTable & /*table*/, const Context &, LoggerPtr) { std::lock_guard lock(mutex1); //std::cout << "executed table:" << UUIDHelpers::UUIDToString(RPCHelpers::createUUID(table.uuid())) << "\n"; @@ -352,7 +352,7 @@ TEST(GlobalGCManager, normal_operation_test_full_queue) global_gc.shutdown(); } -bool executeGlobalGCDummy2(const Protos::DataModelTable & /*table*/, const Context &, Poco::Logger *) +bool executeGlobalGCDummy2(const Protos::DataModelTable & /*table*/, const Context &, LoggerPtr) { sleep(1); return true; diff --git a/src/Optimizer/CardinalityEstimate/BNCardEstimator/BNModelManager.cpp b/src/Optimizer/CardinalityEstimate/BNCardEstimator/BNModelManager.cpp index e3e353cfaa..8fc40db411 100644 --- a/src/Optimizer/CardinalityEstimate/BNCardEstimator/BNModelManager.cpp +++ b/src/Optimizer/CardinalityEstimate/BNCardEstimator/BNModelManager.cpp @@ -122,7 +122,7 @@ void BNModelManager::fetchImpl() #endif stats = fetchLocalImpl(); LOG_INFO( - &Poco::Logger::get("BNModelManagerFetcher"), + getLogger("BNModelManagerFetcher"), "Fetching task runs in {} ms, scan total {} models, having {} reloaded models and {} exception", watch.elapsedMilliseconds(), stats.total, @@ -134,7 +134,7 @@ BNModelManager::FetchStats BNModelManager::fetchLocalImpl(const StorageID & tabl { if (!fs::exists(directory)) { - LOG_WARNING(&Poco::Logger::get("BNModelManagerFetcher"), "Directory {} doesn't exists, skip this run", directory); + LOG_WARNING(getLogger("BNModelManagerFetcher"), "Directory {} doesn't exists, skip this run", directory); return {}; } int total = 0, reloaded = 0, exception = 0; @@ -164,13 +164,13 @@ BNModelManager::FetchStats BNModelManager::fetchLocalImpl(const StorageID & tabl if (updateModel(storage_id, readFileFromLocal(entry.path()), readFileFromLocal(meta_path), last_write_time)) ++reloaded; - LOG_INFO(&Poco::Logger::get("BNModelManagerFetcher"), "Load BN Models: {}", storage_id.getFullNameNotQuoted()); + LOG_INFO(getLogger("BNModelManagerFetcher"), "Load BN Models: {}", storage_id.getFullNameNotQuoted()); } catch (const Exception & e) { ++exception; LOG_WARNING( - &Poco::Logger::get("BNModelManagerFetcher"), + getLogger("BNModelManagerFetcher"), "Having exception <{}> while loading model {}, skip this run", e.what(), entry.path().string()); @@ -179,7 +179,7 @@ BNModelManager::FetchStats BNModelManager::fetchLocalImpl(const StorageID & tabl { ++exception; LOG_WARNING( - &Poco::Logger::get("BNModelManagerFetcher"), + getLogger("BNModelManagerFetcher"), "Having exception <{}> while loading model {}, skip this run", e.what(), entry.path().string()); @@ -188,7 +188,7 @@ BNModelManager::FetchStats BNModelManager::fetchLocalImpl(const StorageID & tabl { ++exception; LOG_WARNING( - &Poco::Logger::get("BNModelManagerFetcher"), + getLogger("BNModelManagerFetcher"), "Having unknown exception while loading model {}, skip this run", entry.path().string()); } @@ -208,7 +208,7 @@ BNModelManager::FetchStats BNModelManager::fetchHdfsImpl(const StorageID & table } if (!hdfs_fs->exists(directory)) { - LOG_WARNING(&Poco::Logger::get("BNModelManagerFetcher"), "Directory {} doesn't exists, skip this run", directory); + LOG_WARNING(getLogger("BNModelManagerFetcher"), "Directory {} doesn't exists, skip this run", directory); return {}; } @@ -237,13 +237,13 @@ BNModelManager::FetchStats BNModelManager::fetchHdfsImpl(const StorageID & table if (updateModel(storage_id, readFileFromHdfs(json_path.replace_extension(".bifxml")), readFileFromHdfs(json_path.replace_extension(".json")), last_write_time)) ++reloaded; - LOG_INFO(&Poco::Logger::get("BNModelManagerFetcher"), "Load BN Models: {}", storage_id.getFullNameNotQuoted()); + LOG_INFO(getLogger("BNModelManagerFetcher"), "Load BN Models: {}", storage_id.getFullNameNotQuoted()); } catch (const Exception & e) { ++exception; LOG_WARNING( - &Poco::Logger::get("BNModelManagerFetcher"), + getLogger("BNModelManagerFetcher"), "Having exception <{}> while loading model {}, skip this run", e.what(), file_name); @@ -252,7 +252,7 @@ BNModelManager::FetchStats BNModelManager::fetchHdfsImpl(const StorageID & table { ++exception; LOG_WARNING( - &Poco::Logger::get("BNModelManagerFetcher"), + getLogger("BNModelManagerFetcher"), "Having exception <{}> while loading model {}, skip this run", e.what(), file_name); @@ -261,7 +261,7 @@ BNModelManager::FetchStats BNModelManager::fetchHdfsImpl(const StorageID & table { ++exception; LOG_WARNING( - &Poco::Logger::get("BNModelManagerFetcher"), "Having unknown exception while loading model {}, skip this run", file_name); + getLogger("BNModelManagerFetcher"), "Having unknown exception while loading model {}, skip this run", file_name); } } @@ -382,7 +382,7 @@ void BNModelManager::updateRowNumbers() for (const auto & part : parts) model.second.row_number += part->rows_count; - LOG_INFO(&Poco::Logger::get("BNModelManagerUpdater"), "Update row number of table {}: {}", + LOG_INFO(getLogger("BNModelManagerUpdater"), "Update row number of table {}: {}", model.first.getFullNameNotQuoted(), model.second.row_number); } @@ -402,7 +402,7 @@ void BNModelManager::fetchSync(const StorageID & table_id) auto end = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast(end - start); LOG_INFO( - &Poco::Logger::get("BNModelManagerFetcher"), "Fetch task run in {} ms, reloaded: {}", duration.count(), stats.reloaded); + getLogger("BNModelManagerFetcher"), "Fetch task run in {} ms, reloaded: {}", duration.count(), stats.reloaded); } } diff --git a/src/Optimizer/CardinalityEstimate/TableScanEstimator.cpp b/src/Optimizer/CardinalityEstimate/TableScanEstimator.cpp index 7cc92d2161..34dfe8ad72 100644 --- a/src/Optimizer/CardinalityEstimate/TableScanEstimator.cpp +++ b/src/Optimizer/CardinalityEstimate/TableScanEstimator.cpp @@ -100,7 +100,7 @@ std::optional TableScanEstimator::estimate( } catch(...) { - auto * logger = &Poco::Logger::get("TableScanEstimator"); + auto logger = getLogger("TableScanEstimator"); tryLogCurrentException(logger); return std::nullopt; } diff --git a/src/Optimizer/Cascades/CascadesOptimizer.cpp b/src/Optimizer/Cascades/CascadesOptimizer.cpp index 38fa5a8fee..b6b36f09c0 100644 --- a/src/Optimizer/Cascades/CascadesOptimizer.cpp +++ b/src/Optimizer/Cascades/CascadesOptimizer.cpp @@ -259,7 +259,7 @@ CascadesContext::CascadesContext( , enable_cbo(enable_cbo_ && context->getSettingsRef().enable_cbo) , max_join_size(max_join_size_) , cost_model(CostModel(*context_)) - , log(&Poco::Logger::get("CascadesOptimizer")) + , log(getLogger("CascadesOptimizer")) { LOG_DEBUG(log, "max join size: {}", max_join_size_); LOG_DEBUG(log, "worker size: {}", worker_size_); diff --git a/src/Optimizer/Cascades/CascadesOptimizer.h b/src/Optimizer/Cascades/CascadesOptimizer.h index 7ffd84c459..a6124047bd 100644 --- a/src/Optimizer/Cascades/CascadesOptimizer.h +++ b/src/Optimizer/Cascades/CascadesOptimizer.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -73,7 +74,7 @@ public: TaskStack & getTaskStack() { return task_stack; } Memo & getMemo() { return memo; } size_t getWorkerSize() const { return worker_size; } - Poco::Logger * getLog() const { return log; } + LoggerPtr getLog() const { return log; } bool isSupportFilter() const { return support_filter; } CTEInfo & getCTEInfo() { return cte_info; } CTEDefPropertyRequirements & getCTEDefPropertyRequirements() { return cte_property_requirements; } @@ -121,7 +122,7 @@ private: }; std::unordered_map> rule_trace; - Poco::Logger * log; + LoggerPtr log; }; class OptimizationContext diff --git a/src/Optimizer/Cascades/Task.h b/src/Optimizer/Cascades/Task.h index 2ecc54ba68..2c90af7468 100644 --- a/src/Optimizer/Cascades/Task.h +++ b/src/Optimizer/Cascades/Task.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -75,7 +76,7 @@ public: protected: OptContextPtr context; - Poco::Logger * log; + LoggerPtr log; }; class OptimizeGroup : public OptimizerTask diff --git a/src/Optimizer/CostModel/JoinCost.cpp b/src/Optimizer/CostModel/JoinCost.cpp index fc1b22821b..8e14c34228 100644 --- a/src/Optimizer/CostModel/JoinCost.cpp +++ b/src/Optimizer/CostModel/JoinCost.cpp @@ -74,7 +74,7 @@ PlanNodeCost JoinCost::calculate(const JoinStep & step, CostContext & context) : PlanNodeCost::cpuCost(right_stats->getOutputSizeInBytes()) ; PlanNodeCost join_cpu_cost = join_stats ? PlanNodeCost::cpuCost(join_stats->getOutputSizeInBytes()) : PlanNodeCost::ZERO; - // Poco::Logger * log = &Poco::Logger::get("JoinCost"); + // LoggerPtr log = getLogger("JoinCost"); // LOG_DEBUG(log, "left {} avg {} right {} join key {} join type {} ", left_cpu_cost.getCpuValue(), getAvgProbeCost(step, context), right_cpu_cost.getCpuValue(), step.getLeftKeys()[0], step.getDistributionType() == DistributionType::REPARTITION); return (left_cpu_cost * context.cost_model.getJoinProbeSideCostWeight() + right_cpu_cost * context.cost_model.getJoinBuildSideCostWeight() + join_cpu_cost) diff --git a/src/Optimizer/DataDependency/FunctionalDependency.cpp b/src/Optimizer/DataDependency/FunctionalDependency.cpp index 0423df7cd3..e9c4e1f978 100644 --- a/src/Optimizer/DataDependency/FunctionalDependency.cpp +++ b/src/Optimizer/DataDependency/FunctionalDependency.cpp @@ -24,9 +24,9 @@ NameSet FunctionalDependencies::simplify(NameSet srcs) const std::string str; for (const auto & name : srcs) str += name + ","; - LOG_INFO(&Poco::Logger::get("DataDependency"), "FunctionalDependencies::simplify srcs -- " + str); + LOG_INFO(getLogger("DataDependency"), "FunctionalDependencies::simplify srcs -- " + str); - // LOG_INFO(&Poco::Logger::get("DataDependency"), "FDS: " + string()); + // LOG_INFO(getLogger("DataDependency"), "FDS: " + string()); for (const auto & [determinant, dependents] : dependencies) { diff --git a/src/Optimizer/Dump/DDLDumper.h b/src/Optimizer/Dump/DDLDumper.h index 7cca8acaf5..8cd6d01afb 100644 --- a/src/Optimizer/Dump/DDLDumper.h +++ b/src/Optimizer/Dump/DDLDumper.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -61,7 +62,7 @@ private: std::unordered_map shard_counts; std::unordered_set visited_tables; DumpUtils::DumpSettings settings; - const Poco::Logger * log = &Poco::Logger::get("DDLDumper"); + const LoggerPtr log = getLogger("DDLDumper"); }; } diff --git a/src/Optimizer/Dump/PlanReproducer.h b/src/Optimizer/Dump/PlanReproducer.h index cc83a09a0d..86b7251709 100644 --- a/src/Optimizer/Dump/PlanReproducer.h +++ b/src/Optimizer/Dump/PlanReproducer.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -81,7 +82,7 @@ private: Poco::JSON::Object::Ptr stats; std::optional cluster; ContextMutablePtr latest_context; - const Poco::Logger * log = &Poco::Logger::get("PlanReproducer"); + const LoggerPtr log = getLogger("PlanReproducer"); }; diff --git a/src/Optimizer/Dump/StatsLoader.h b/src/Optimizer/Dump/StatsLoader.h index b7c565a596..729f73e0cd 100644 --- a/src/Optimizer/Dump/StatsLoader.h +++ b/src/Optimizer/Dump/StatsLoader.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -31,7 +32,7 @@ private: Poco::JSON::Object::Ptr stats_json); const std::string json_file_path; Statistics::CatalogAdaptorPtr stats_catalog; - const Poco::Logger * log = &Poco::Logger::get("StatsLoader"); + const LoggerPtr log = getLogger("StatsLoader"); Poco::JSON::Object::Ptr stats_json; }; } diff --git a/src/Optimizer/IntermediateResult/CacheParamBuilder.cpp b/src/Optimizer/IntermediateResult/CacheParamBuilder.cpp index 64295b71a2..6f5ebc1032 100644 --- a/src/Optimizer/IntermediateResult/CacheParamBuilder.cpp +++ b/src/Optimizer/IntermediateResult/CacheParamBuilder.cpp @@ -122,7 +122,7 @@ size_t CacheParamBuilder::computeAggregatingHash(std::shared_ptrgetName(), new_step->hash(), json_msg); + // LOG_DEBUG(getLogger("AddCache"), " {} hash: {}\n json: {}", new_step->getName(), new_step->hash(), json_msg); return new_step->hash(); } diff --git a/src/Optimizer/IntermediateResult/CacheableChecker.h b/src/Optimizer/IntermediateResult/CacheableChecker.h index 949723a529..9767d816c2 100644 --- a/src/Optimizer/IntermediateResult/CacheableChecker.h +++ b/src/Optimizer/IntermediateResult/CacheableChecker.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -83,7 +84,7 @@ namespace CacheableChecker private: const RuntimeFilterBuildsAndProbes query_runtime_filter_info; ContextPtr context; - Poco::Logger * log = &Poco::Logger::get("RuntimeFilterCacheableChecker"); + LoggerPtr log = getLogger("RuntimeFilterCacheableChecker"); // add the rtf to checked if cachable; also add extra rtfs to unchecked_ids bool checkRuntimeFilterId(RuntimeFilterId id, diff --git a/src/Optimizer/MaterializedView/MaterializedViewMemoryCache.cpp b/src/Optimizer/MaterializedView/MaterializedViewMemoryCache.cpp index 35578b26ca..57aa3cf992 100644 --- a/src/Optimizer/MaterializedView/MaterializedViewMemoryCache.cpp +++ b/src/Optimizer/MaterializedView/MaterializedViewMemoryCache.cpp @@ -115,7 +115,7 @@ MaterializedViewMemoryCache::getMaterializedViewStructure( } catch (Exception & exception) { - static auto * log = &Poco::Logger::get("MaterializedViewRewriter"); + static auto log = getLogger("MaterializedViewRewriter"); if (exception.code() == ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW) LOG_DEBUG(log, "skip {}, reason: {}", materialized_view_id.getFullTableName(), exception.message()); else diff --git a/src/Optimizer/PlanOptimizer.cpp b/src/Optimizer/PlanOptimizer.cpp index 6f65624131..966982ffbf 100644 --- a/src/Optimizer/PlanOptimizer.cpp +++ b/src/Optimizer/PlanOptimizer.cpp @@ -550,7 +550,7 @@ void PlanOptimizer::optimize(QueryPlan & plan, ContextMutablePtr context) total_watch.restart(); PlanCheck::checkFinalPlan(plan, context); - context->logOptimizerProfile(&Poco::Logger::get("PlanOptimizer"), + context->logOptimizerProfile(getLogger("PlanOptimizer"), "Optimizer stage run time: ", "checkFinalPlan", std::to_string(total_watch.elapsedMillisecondsAsDouble()) + "ms", true); @@ -578,6 +578,6 @@ void PlanOptimizer::optimize(QueryPlan & plan, ContextMutablePtr context, const } UInt64 elapsed = total_watch.elapsedMilliseconds(); - LOG_DEBUG(&Poco::Logger::get("PlanOptimizer"), "Total optimizer time: " + std::to_string(elapsed)); + LOG_DEBUG(getLogger("PlanOptimizer"), "Total optimizer time: " + std::to_string(elapsed)); } } diff --git a/src/Optimizer/QueryUseOptimizerChecker.cpp b/src/Optimizer/QueryUseOptimizerChecker.cpp index 9109e82bb5..fbc38c7ced 100644 --- a/src/Optimizer/QueryUseOptimizerChecker.cpp +++ b/src/Optimizer/QueryUseOptimizerChecker.cpp @@ -171,7 +171,7 @@ bool QueryUseOptimizerChecker::check(ASTPtr node, ContextMutablePtr context, boo if (!support) { LOG_INFO( - &Poco::Logger::get("QueryUseOptimizerChecker"), "query is unsupported for optimizer, reason: " + checker.getReason()); + getLogger("QueryUseOptimizerChecker"), "query is unsupported for optimizer, reason: " + checker.getReason()); reason = checker.getReason(); } } @@ -198,7 +198,7 @@ bool QueryUseOptimizerChecker::check(ASTPtr node, ContextMutablePtr context, boo } LOG_DEBUG( - &Poco::Logger::get("QueryUseOptimizerChecker"), + getLogger("QueryUseOptimizerChecker"), fmt::format("support: {}, check: {}", support, check(insert_query->select, context))); if (support) support = check(insert_query->select, context, throw_exception); diff --git a/src/Optimizer/Rewriter/AddBufferForDeadlockCTE.cpp b/src/Optimizer/Rewriter/AddBufferForDeadlockCTE.cpp index 0d9dcd16af..dae3f0324a 100644 --- a/src/Optimizer/Rewriter/AddBufferForDeadlockCTE.cpp +++ b/src/Optimizer/Rewriter/AddBufferForDeadlockCTE.cpp @@ -58,7 +58,7 @@ namespace class FindDirectRightVisitor : public PlanNodeVisitor { public: - explicit FindDirectRightVisitor(CTEInfo & cte_info_, Poco::Logger * logger_) : cte_info(cte_info_), logger(logger_) + explicit FindDirectRightVisitor(CTEInfo & cte_info_, LoggerPtr logger_) : cte_info(cte_info_), logger(logger_) { } @@ -67,7 +67,7 @@ namespace void visitJoinNode(JoinNode & node, const JoinPath &) override; CTEInfo & cte_info; - Poco::Logger * logger; + LoggerPtr logger; std::unordered_set deadlock_ctes; VisitPath visit_path; }; @@ -110,7 +110,7 @@ namespace { public: AddBufferVisitor( - const std::unordered_set & deadlock_ctes_, ContextMutablePtr context_, CTEInfo & cte_info_, Poco::Logger * logger_) + const std::unordered_set & deadlock_ctes_, ContextMutablePtr context_, CTEInfo & cte_info_, LoggerPtr logger_) : SimplePlanRewriter(std::move(context_), cte_info_), deadlock_ctes(deadlock_ctes_), logger(logger_) { } @@ -118,7 +118,7 @@ namespace PlanNodePtr visitCTERefNode(CTERefNode & node, const Void & c) override; const std::unordered_set & deadlock_ctes; - Poco::Logger * logger; + LoggerPtr logger; }; void FindDirectRightVisitor::visitPlanNode(PlanNodeBase & node, const JoinPath & join_path) @@ -347,7 +347,7 @@ namespace bool AddBufferForDeadlockCTE::rewrite(QueryPlan & plan, ContextMutablePtr context) const { - static auto * logger = &Poco::Logger::get("AddBufferForDeadlockCTE"); + static auto logger = getLogger("AddBufferForDeadlockCTE"); if (plan.getCTEInfo().empty()) return false; diff --git a/src/Optimizer/Rewriter/AddRuntimeFilters.h b/src/Optimizer/Rewriter/AddRuntimeFilters.h index 84fda8b994..c5a0ba1a61 100644 --- a/src/Optimizer/Rewriter/AddRuntimeFilters.h +++ b/src/Optimizer/Rewriter/AddRuntimeFilters.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -72,7 +73,7 @@ private: ContextMutablePtr context; CTEInfo & cte_info; SimpleCTEVisitHelper cte_helper; - Poco::Logger * logger = &Poco::Logger::get("AddRuntimeFilters"); + LoggerPtr logger = getLogger("AddRuntimeFilters"); }; struct RuntimeFilterContext diff --git a/src/Optimizer/Rewriter/BitmapIndexSplitter.cpp b/src/Optimizer/Rewriter/BitmapIndexSplitter.cpp index effef3da8e..52435c0400 100644 --- a/src/Optimizer/Rewriter/BitmapIndexSplitter.cpp +++ b/src/Optimizer/Rewriter/BitmapIndexSplitter.cpp @@ -47,7 +47,7 @@ PlanNodePtr createProjection(PlanNodes children, Assignments array_set_check_fun auto projection = std::make_shared(child->getCurrentDataStream(), assignments, name_to_type, false, true); LOG_DEBUG( - &Poco::Logger::get("createProjection"), + getLogger("createProjection"), fmt::format( "projection input: {}, output: {}", projection->getInputStreams()[0].header.dumpStructure(), diff --git a/src/Optimizer/Rewriter/EliminateJoinByForeignKey.cpp b/src/Optimizer/Rewriter/EliminateJoinByForeignKey.cpp index e44c68fb85..a17488f0d3 100644 --- a/src/Optimizer/Rewriter/EliminateJoinByForeignKey.cpp +++ b/src/Optimizer/Rewriter/EliminateJoinByForeignKey.cpp @@ -127,7 +127,7 @@ bool EliminateJoinByFK::rewrite(QueryPlan & plan, ContextMutablePtr context) con ostr << "winner_bottom_joins: "; for (const auto & bottom_join : winner.second.bottom_joins) ostr << bottom_join.first->getId() << ", "; - LOG_INFO(&Poco::Logger::get("DataDependency"), "EliminateJoinByFK-JoinInfo. " + ostr.str()); + LOG_INFO(getLogger("DataDependency"), "EliminateJoinByFK-JoinInfo. " + ostr.str()); EliminateJoinByFK::Eliminator eliminator{context, plan.getCTEInfo(), winner, info}; JoinEliminationContext c; @@ -145,7 +145,7 @@ FPKeysAndOrdinaryKeys EliminateJoinByFK::Rewriter::visitPlanNode(PlanNodeBase & FPKeysAndOrdinaryKeys translated = VisitorUtil::accept(node.getChildren()[0], *this, join_info); - // LOG_INFO(&Poco::Logger::get("DataDependency"), "visitPlanNode=" + std::to_string(node.getId()) + ", winners=" + std::to_string(join_info.getWinners().size()) + ". " + translated.keysStr()); + // LOG_INFO(getLogger("DataDependency"), "visitPlanNode=" + std::to_string(node.getId()) + ", winners=" + std::to_string(join_info.getWinners().size()) + ". " + translated.keysStr()); return translated.clearFPKeys(); } @@ -211,7 +211,7 @@ FPKeysAndOrdinaryKeys EliminateJoinByFK::Rewriter::visitJoinNode(JoinNode & node translated.fp_keys = common_fp_keys; } - // LOG_INFO(&Poco::Logger::get("DataDependency"), "visitJoinNode=" + std::to_string(node.getId()) + ", winners=" + std::to_string(join_info.getWinners().size()) + ". " + translated.keysStr()); + // LOG_INFO(getLogger("DataDependency"), "visitJoinNode=" + std::to_string(node.getId()) + ", winners=" + std::to_string(join_info.getWinners().size()) + ". " + translated.keysStr()); bool is_inner_join = step.getKind() == ASTTableJoin::Kind::Inner; bool is_outer_join = step.isOuterJoin() && step.getKind() != ASTTableJoin::Kind::Full; // only allow left outer/right outer join. @@ -726,7 +726,7 @@ FPKeysAndOrdinaryKeys EliminateJoinByFK::Rewriter::visitUnionNode(UnionNode & no }); } - // LOG_INFO(&Poco::Logger::get("DataDependency"), "visitPlanNode=" + std::to_string(node.getId()) + ", winners=" + std::to_string(join_info.getWinners().size()) + ". " + result.keysStr()); + // LOG_INFO(getLogger("DataDependency"), "visitPlanNode=" + std::to_string(node.getId()) + ", winners=" + std::to_string(join_info.getWinners().size()) + ". " + result.keysStr()); std::unordered_map old_winners = join_info.reset(invalid_tables, join_infos); collectEliminableJoin(old_winners); diff --git a/src/Optimizer/Rewriter/GroupByKeysPruning.cpp b/src/Optimizer/Rewriter/GroupByKeysPruning.cpp index 0f96f3ae4e..a72aa3eeb5 100644 --- a/src/Optimizer/Rewriter/GroupByKeysPruning.cpp +++ b/src/Optimizer/Rewriter/GroupByKeysPruning.cpp @@ -92,7 +92,7 @@ PlanAndDataDependencyWithConstants GroupByKeysPruning::Rewriter::visitAggregatin std::string str; for (const auto & name : simplified_agg_keys) str += name + ","; - LOG_INFO(&Poco::Logger::get("DataDependency"), "after GroupByKeysPruning by functional dependecy, new_agg_keys -- " + str + ". note we don't remove unused keys, but just add it to keys_not_hashed"); + LOG_INFO(getLogger("DataDependency"), "after GroupByKeysPruning by functional dependecy, new_agg_keys -- " + str + ". note we don't remove unused keys, but just add it to keys_not_hashed"); } auto node_ptr = node.shared_from_this(); @@ -137,7 +137,7 @@ PlanAndDataDependencyWithConstants GroupByKeysPruning::Rewriter::visitAggregatin std::string str; for (const auto & name : new_agg_keys) str += name + ","; - LOG_INFO(&Poco::Logger::get("DataDependency"), "after GroupByKeysPruning by constants, new_agg_keys -- " + str); + LOG_INFO(getLogger("DataDependency"), "after GroupByKeysPruning by constants, new_agg_keys -- " + str); } } diff --git a/src/Optimizer/Rewriter/MaterializedViewRewriter.cpp b/src/Optimizer/Rewriter/MaterializedViewRewriter.cpp index 73cbba3066..ca450d9875 100644 --- a/src/Optimizer/Rewriter/MaterializedViewRewriter.cpp +++ b/src/Optimizer/Rewriter/MaterializedViewRewriter.cpp @@ -216,7 +216,7 @@ public: if (verbose) { - static auto * log = &Poco::Logger::get("CandidatesExplorer"); + static auto log = getLogger("CandidatesExplorer"); for (auto & item : explorer.failure_messages) for (auto & message : item.second) LOG_DEBUG( @@ -1222,7 +1222,7 @@ public: std::map> materialized_views_stats; const bool verbose; SimpleCTEVisitHelper cte_helper; - Poco::Logger * logger = &Poco::Logger::get("CandidatesExplorer"); + LoggerPtr logger = getLogger("CandidatesExplorer"); }; using ASTToStringMap = EqualityASTMap; diff --git a/src/Optimizer/Rewriter/MaterializedViewRewriter.h b/src/Optimizer/Rewriter/MaterializedViewRewriter.h index 27d9340d56..4ec3868b05 100644 --- a/src/Optimizer/Rewriter/MaterializedViewRewriter.h +++ b/src/Optimizer/Rewriter/MaterializedViewRewriter.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -45,6 +46,6 @@ private: LinkedHashMap getRelatedMaterializedViews(QueryPlan & plan, ContextMutablePtr context) const; - Poco::Logger * log = &Poco::Logger::get("MaterializedViewRewriter"); + LoggerPtr log = getLogger("MaterializedViewRewriter"); }; } diff --git a/src/Optimizer/Rewriter/PredicatePushdown.cpp b/src/Optimizer/Rewriter/PredicatePushdown.cpp index dacb7d5477..68ab8dd0a3 100644 --- a/src/Optimizer/Rewriter/PredicatePushdown.cpp +++ b/src/Optimizer/Rewriter/PredicatePushdown.cpp @@ -156,7 +156,7 @@ PlanNodePtr PredicateVisitor::visitProjectionNode(ProjectionNode & node, Predica auto pushdown_predicate = PredicateUtils::combineConjuncts(inlined_deterministic_conjuncts); LOG_DEBUG( - &Poco::Logger::get("PredicateVisitor"), + getLogger("PredicateVisitor"), "project node {}, pushdown_predicate : {}", node.getId(), pushdown_predicate->formatForErrorMessage()); @@ -196,7 +196,7 @@ PlanNodePtr PredicateVisitor::visitFilterNode(FilterNode & node, PredicateContex std::pair split_in_filter = FilterStep::splitLargeInValueList(step.getFilter(), limit); LOG_DEBUG( - &Poco::Logger::get("PredicateVisitor"), + getLogger("PredicateVisitor"), "filter node {}, split_in_filter.first : {}, split_in_filter.second : {}", node.getId(), split_in_filter.first->formatForErrorMessage(), @@ -211,7 +211,7 @@ PlanNodePtr PredicateVisitor::visitFilterNode(FilterNode & node, PredicateContex } LOG_DEBUG( - &Poco::Logger::get("PredicateVisitor"), + getLogger("PredicateVisitor"), "filter node {}, pushdown_predicate : {}", node.getId(), predicate->formatForErrorMessage()); @@ -398,7 +398,7 @@ PlanNodePtr PredicateVisitor::visitJoinNode(JoinNode & node, PredicateContext & ASTTableJoin::Kind kind = step->getKind(); LOG_DEBUG( - &Poco::Logger::get("PredicateVisitor"), + getLogger("PredicateVisitor"), "join node {}, inherited_predicate : {}, left effective predicate: {} , right effective predicate: {}, join_predicate : {}", node.getId(), inherited_predicate->formatForErrorMessage(), diff --git a/src/Optimizer/Rewriter/PredicatePushdown.h b/src/Optimizer/Rewriter/PredicatePushdown.h index f309da87df..bd5500baab 100644 --- a/src/Optimizer/Rewriter/PredicatePushdown.h +++ b/src/Optimizer/Rewriter/PredicatePushdown.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -91,7 +92,7 @@ private: CTEInfo & cte_info; const std::unordered_map & cte_reference_counts; std::unordered_map>> cte_predicates{}; - Poco::Logger * logger = &Poco::Logger::get("PredicateVisitor"); + LoggerPtr logger = getLogger("PredicateVisitor"); PlanNodePtr process(PlanNodeBase &, PredicateContext &); PlanNodePtr processChild(PlanNodeBase &, PredicateContext &); diff --git a/src/Optimizer/Rewriter/Rewriter.cpp b/src/Optimizer/Rewriter/Rewriter.cpp index 6e05e3de8c..1048ec9860 100644 --- a/src/Optimizer/Rewriter/Rewriter.cpp +++ b/src/Optimizer/Rewriter/Rewriter.cpp @@ -17,11 +17,11 @@ void Rewriter::rewritePlan(QueryPlan & plan, ContextMutablePtr context) const double duration = watch.elapsedMillisecondsAsDouble(); context->logOptimizerProfile( - &Poco::Logger::get("PlanOptimizer"), "Optimizer rule run time: ", name(), std::to_string(duration) + "ms", true); + getLogger("PlanOptimizer"), "Optimizer rule run time: ", name(), std::to_string(duration) + "ms", true); if (duration >= context->getSettingsRef().plan_optimizer_rule_warning_time) LOG_WARNING( - &Poco::Logger::get("PlanOptimizer"), + getLogger("PlanOptimizer"), "the execute time of " + name() + " rewriter " + std::to_string(duration) + " ms greater than or equal to " + std::to_string(context->getSettingsRef().plan_optimizer_rule_warning_time) + " ms"); diff --git a/src/Optimizer/Rewriter/ShareCommonExpression.cpp b/src/Optimizer/Rewriter/ShareCommonExpression.cpp index 0f4b144835..0dab9059d2 100644 --- a/src/Optimizer/Rewriter/ShareCommonExpression.cpp +++ b/src/Optimizer/Rewriter/ShareCommonExpression.cpp @@ -501,7 +501,7 @@ namespace PlanNodePtr ShareCommonExpression::rewriteImpl(PlanNodePtr root, ContextMutablePtr context) { assert(root != nullptr); - Poco::Logger * logger = &Poco::Logger::get("ShareCommonExpression"); + LoggerPtr logger = getLogger("ShareCommonExpression"); std::vector stack; stack.emplace_back(root.get()); diff --git a/src/Optimizer/Rewriter/UseSortingProperty.h b/src/Optimizer/Rewriter/UseSortingProperty.h index 529aea3a7b..3471771b50 100644 --- a/src/Optimizer/Rewriter/UseSortingProperty.h +++ b/src/Optimizer/Rewriter/UseSortingProperty.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -66,7 +67,7 @@ class PruneSortingInfoRewriter : public SimplePlanRewriter { public: PruneSortingInfoRewriter(ContextMutablePtr context_, CTEInfo & cte_info_) - : SimplePlanRewriter(context_, cte_info_), logger(&Poco::Logger::get("PruneSortingInfoRewriter")) + : SimplePlanRewriter(context_, cte_info_), logger(getLogger("PruneSortingInfoRewriter")) { } @@ -79,7 +80,7 @@ public: PlanNodePtr visitTableScanNode(TableScanNode &, SortInfo & required) override; private: - Poco::Logger * logger; + LoggerPtr logger; }; } diff --git a/src/Optimizer/Rule/Rewrite/EagerAggregation.cpp b/src/Optimizer/Rule/Rewrite/EagerAggregation.cpp index 45e7da9fbb..89f8f819af 100644 --- a/src/Optimizer/Rule/Rewrite/EagerAggregation.cpp +++ b/src/Optimizer/Rule/Rewrite/EagerAggregation.cpp @@ -254,7 +254,7 @@ updateAggS0AndG0(NameSet names_from_one_side, const NameSet & projection_gene_sy } // LOG_WARNING( - // &Poco::Logger::get("test"), + // getLogger("test"), // "names_from_one_side={}, g0={}, new_g0={}, s0={}, new_s0={}", // fmt::join(names_from_one_side, ","), // fmt::join(g0, ","), @@ -278,7 +278,7 @@ static LocalGroupByTargetMap determineBottomJoin( LocalGroupByTargetMap result; // LOG_WARNING( - // &Poco::Logger::get("test"), + // getLogger("test"), // "\tinto determineBottomJoin, init_s0={}, init_g0={}, projection_gene_symbols={}, projection_gene_symbols={}, " // "init_require_output_names_of_join={}", // formatS0(init_s0), @@ -327,7 +327,7 @@ static LocalGroupByTargetMap determineBottomJoin( std::sort(g0.begin(), g0.end()); g0.erase(std::unique(g0.begin(), g0.end()), g0.end()); - // LOG_WARNING(&Poco::Logger::get("test"), "collect new local gorup by target, join_id={}, index={}, g0={}, s0={}", join->getId(), index, fmt::join(g0, ","), formatS0(s0)); + // LOG_WARNING(getLogger("test"), "collect new local gorup by target, join_id={}, index={}, g0={}, s0={}", join->getId(), index, fmt::join(g0, ","), formatS0(s0)); result.emplace(join->getId(), LocalGroupByTarget{join, index, s0, g0, join_layer}); return; } @@ -387,7 +387,7 @@ static LocalGroupByTargetMap determineBottomJoin( std::shared_ptr createLocalAggregate(const DataStream & input_stream, const AggregateDescriptions & s0, const Names & g0, const ContextPtr &) { - // LOG_WARNING(&Poco::Logger::get("test"), "create local_agg={}, keys={}", formatS0(s0), fmt::join(g0, ",")); + // LOG_WARNING(getLogger("test"), "create local_agg={}, keys={}", formatS0(s0), fmt::join(g0, ",")); return std::make_shared( input_stream, g0, NameSet{}, s0, GroupingSetsParamsList{}, true); @@ -420,7 +420,7 @@ PlanNodePtr insertLocalAggregate( // String names; // for (const auto & [k, v] : global_argument_name_to_local) // names += "k=" + k + ",v=" + v + " "; - // LOG_WARNING(&Poco::Logger::get("test"), "before insertLocalAggregate, global_argument_name_to_local={}", names); + // LOG_WARNING(getLogger("test"), "before insertLocalAggregate, global_argument_name_to_local={}", names); auto symbol_mapper = SymbolMapper::simpleMapper(global_argument_name_to_local); @@ -613,7 +613,7 @@ PlanNodePtr insertLocalAggregate( bool canAggPushDown(const LocalGroupByTarget & target, RuleContext & context) { LOG_DEBUG( - &Poco::Logger::get("test"), + getLogger("test"), "judge local group by target, join_id={}, index={}, g0={}, s0={}, join_layer={}", target.bottom_join->getId(), target.bottom_join_child_index, @@ -684,7 +684,7 @@ bool canAggPushDown(const LocalGroupByTarget & target, RuleContext & context) return false; LOG_DEBUG( - &Poco::Logger::get("test"), + getLogger("test"), "agg_size={}, group_by_keys_size={}, new_row_count={}, old_row_count={}, ratio={}", target.aggs.size(), target.keys.size(), diff --git a/src/Optimizer/Rule/Rewrite/ExtractBitmapImplicitFilter.cpp b/src/Optimizer/Rule/Rewrite/ExtractBitmapImplicitFilter.cpp index 8f32909068..24ab0ed83f 100644 --- a/src/Optimizer/Rule/Rewrite/ExtractBitmapImplicitFilter.cpp +++ b/src/Optimizer/Rule/Rewrite/ExtractBitmapImplicitFilter.cpp @@ -110,7 +110,7 @@ TransformResult ExtractBitmapImplicitFilter::transformImpl(PlanNodePtr node, con return {}; LOG_DEBUG( - &Poco::Logger::get("ExtractBitmapImplicitFilter"), + getLogger("ExtractBitmapImplicitFilter"), "Extract bitmap implicit filter for plan node {}, extracted filter: {}", node->getId(), serializeAST(*implicit_filter)); diff --git a/src/Optimizer/Rule/Rewrite/PushUnionThroughJoin.h b/src/Optimizer/Rule/Rewrite/PushUnionThroughJoin.h index b4be21a63b..3515d7653f 100644 --- a/src/Optimizer/Rule/Rewrite/PushUnionThroughJoin.h +++ b/src/Optimizer/Rule/Rewrite/PushUnionThroughJoin.h @@ -1,5 +1,6 @@ #pragma once +#include #include namespace DB @@ -36,7 +37,7 @@ public: TransformResult transformImpl(PlanNodePtr node, const Captures & captures, RuleContext & context) override; - Poco::Logger * log = &Poco::Logger::get("PushUnionThroughJoin"); + LoggerPtr log = getLogger("PushUnionThroughJoin"); }; class PushUnionThroughProjection : public Rule diff --git a/src/Optimizer/Rule/Transformation/InnerJoinAssociate.cpp b/src/Optimizer/Rule/Transformation/InnerJoinAssociate.cpp index 2af6035077..88eef4abc6 100644 --- a/src/Optimizer/Rule/Transformation/InnerJoinAssociate.cpp +++ b/src/Optimizer/Rule/Transformation/InnerJoinAssociate.cpp @@ -66,7 +66,7 @@ TransformResult InnerJoinAssociate::transformImpl(PlanNodePtr node, const Captur auto c = node->getChildren()[1]; // LOG_DEBUG( - // &Poco::Logger::get("InnerJoinAssociate"), + // getLogger("InnerJoinAssociate"), // fmt::format( // "hint {}/{}/{}", // dynamic_cast(*a->getStep()).getGroupId(), diff --git a/src/Optimizer/Signature/StepNormalizer.h b/src/Optimizer/Signature/StepNormalizer.h index 04bef58bbe..973bb9de40 100644 --- a/src/Optimizer/Signature/StepNormalizer.h +++ b/src/Optimizer/Signature/StepNormalizer.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -71,7 +72,7 @@ protected: private: ContextPtr context; - Poco::Logger * log = &Poco::Logger::get("StepNormalizer"); + LoggerPtr log = getLogger("StepNormalizer"); }; } diff --git a/src/Parsers/DumpASTNode.h b/src/Parsers/DumpASTNode.h index 27ea3b50d6..6ee3c237f4 100644 --- a/src/Parsers/DumpASTNode.h +++ b/src/Parsers/DumpASTNode.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -96,7 +97,7 @@ public: : log(nullptr) { if constexpr (_enable) - log = &Poco::Logger::get("AST"); + log = getLogger("AST"); } ~DebugASTLog() @@ -108,7 +109,7 @@ public: WriteBuffer * stream() { return (_enable ? &buf : nullptr); } private: - Poco::Logger * log; + LoggerPtr log; WriteBufferFromOwnString buf; }; diff --git a/src/Processors/Exchange/BroadcastExchangeSink.cpp b/src/Processors/Exchange/BroadcastExchangeSink.cpp index fe6f56c241..db2e008408 100644 --- a/src/Processors/Exchange/BroadcastExchangeSink.cpp +++ b/src/Processors/Exchange/BroadcastExchangeSink.cpp @@ -32,7 +32,7 @@ BroadcastExchangeSink::BroadcastExchangeSink(Block header_, BroadcastSenderPtrs , senders(std::move(senders_)) , options(std::move(options_)) , buffer_chunk(getPort().getHeader(), options.send_threshold_in_bytes, options.send_threshold_in_row_num) - , logger(&Poco::Logger::get("BroadcastExchangeSink")) + , logger(getLogger("BroadcastExchangeSink")) { if (options.force_use_buffer) buffer_chunk.resetBuffer(); diff --git a/src/Processors/Exchange/BroadcastExchangeSink.h b/src/Processors/Exchange/BroadcastExchangeSink.h index cb57adb98c..333f303d92 100644 --- a/src/Processors/Exchange/BroadcastExchangeSink.h +++ b/src/Processors/Exchange/BroadcastExchangeSink.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -58,7 +59,7 @@ private: BroadcastSenderPtrs senders; ExchangeOptions options; BufferChunk buffer_chunk; - Poco::Logger * logger; + LoggerPtr logger; }; } diff --git a/src/Processors/Exchange/BufferChunk.cpp b/src/Processors/Exchange/BufferChunk.cpp index bbc105dc6d..c2fac1aea0 100644 --- a/src/Processors/Exchange/BufferChunk.cpp +++ b/src/Processors/Exchange/BufferChunk.cpp @@ -29,7 +29,7 @@ BufferChunk::BufferChunk( , column_num(header_.getColumns().size()) , threshold_in_bytes(threshold_in_bytes_) , threshold_in_row_num(threshold_in_row_num_) - , logger(&Poco::Logger::get("BufferChunk")) + , logger(getLogger("BufferChunk")) { resetBuffer(); } diff --git a/src/Processors/Exchange/BufferChunk.h b/src/Processors/Exchange/BufferChunk.h index 4277a00d42..24bb077694 100644 --- a/src/Processors/Exchange/BufferChunk.h +++ b/src/Processors/Exchange/BufferChunk.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -39,7 +40,7 @@ private: UInt64 threshold_in_row_num; MutableColumns buffer_columns; ChunkInfoPtr current_chunk_info; - Poco::Logger * logger; + LoggerPtr logger; inline size_t bufferBytes() const; inline bool compareBufferChunkInfo(const ChunkInfoPtr & chunk_info) const; }; diff --git a/src/Processors/Exchange/DataTrans/Batch/DiskExchangeDataManager.cpp b/src/Processors/Exchange/DataTrans/Batch/DiskExchangeDataManager.cpp index dd9a6060ff..5f06634a53 100644 --- a/src/Processors/Exchange/DataTrans/Batch/DiskExchangeDataManager.cpp +++ b/src/Processors/Exchange/DataTrans/Batch/DiskExchangeDataManager.cpp @@ -127,7 +127,7 @@ DiskExchangeDataManagerPtr DiskExchangeDataManager::createDiskExchangeDataManage chassert(disk); if (volume->getDisks().size() != 1) LOG_INFO( - &Poco::Logger::get("DiskExchangeDataManager"), + getLogger("DiskExchangeDataManager"), "bsp mode now only supports single disk, will use default disk:{} of volume:{}", disk->getName(), volume->getName()); @@ -149,7 +149,7 @@ DiskExchangeDataManager::DiskExchangeDataManager( ServiceDiscoveryClientPtr service_discovery_client_, const String & psm_name_) : WithContext(context_) - , logger(&Poco::Logger::get("DiskExchangeDataManager")) + , logger(getLogger("DiskExchangeDataManager")) , start_gc_random_wait_seconds(options_.start_gc_random_wait_seconds) , disk(std::move(disk_)) , path(options_.path) diff --git a/src/Processors/Exchange/DataTrans/Batch/DiskExchangeDataManager.h b/src/Processors/Exchange/DataTrans/Batch/DiskExchangeDataManager.h index 54766ca1f7..d1ef4d69cc 100644 --- a/src/Processors/Exchange/DataTrans/Batch/DiskExchangeDataManager.h +++ b/src/Processors/Exchange/DataTrans/Batch/DiskExchangeDataManager.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -144,7 +145,7 @@ private: ssize_t getFileSizeRecursively(const String & file_path); bool cancelWriteTasks(const std::vector & writers); - Poco::Logger * logger; + LoggerPtr logger; /// this mutex protects read_tasks, write_tasks, cleanup_tasks, alive_queries bthread::Mutex mutex; std::map read_tasks; diff --git a/src/Processors/Exchange/DataTrans/Batch/Reader/DiskExchangeDataSource.cpp b/src/Processors/Exchange/DataTrans/Batch/Reader/DiskExchangeDataSource.cpp index 5d9978a279..6844512687 100644 --- a/src/Processors/Exchange/DataTrans/Batch/Reader/DiskExchangeDataSource.cpp +++ b/src/Processors/Exchange/DataTrans/Batch/Reader/DiskExchangeDataSource.cpp @@ -21,7 +21,7 @@ Chunk DiskExchangeDataSource::generate() if (bufs.empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, fmt::format("empty files to read {}", *key)); initStream(); - LOG_DEBUG(&Poco::Logger::get("DiskExchangeDataSource"), "Start to read file {}", bufs[0]->getFileName()); + LOG_DEBUG(getLogger("DiskExchangeDataSource"), "Start to read file {}", bufs[0]->getFileName()); } auto c = stream->readImpl(); if (!c) @@ -44,7 +44,7 @@ Chunk DiskExchangeDataSource::readNextFile() { idx++; initStream(); - LOG_DEBUG(&Poco::Logger::get("DiskExchangeDataSource"), "Start to read file {}", bufs[idx]->getFileName()); + LOG_DEBUG(getLogger("DiskExchangeDataSource"), "Start to read file {}", bufs[idx]->getFileName()); res = stream->readImpl(); } diff --git a/src/Processors/Exchange/DataTrans/Batch/Writer/DiskPartitionWriter.cpp b/src/Processors/Exchange/DataTrans/Batch/Writer/DiskPartitionWriter.cpp index 2f1167252d..606eaa88df 100644 --- a/src/Processors/Exchange/DataTrans/Batch/Writer/DiskPartitionWriter.cpp +++ b/src/Processors/Exchange/DataTrans/Batch/Writer/DiskPartitionWriter.cpp @@ -38,7 +38,7 @@ DiskPartitionWriter::DiskPartitionWriter( , mgr(mgr_) , header(std::move(header_)) , key(std::move(key_)) - , log(&Poco::Logger::get("DiskPartitionWriter")) + , log(getLogger("DiskPartitionWriter")) , data_queue(std::make_shared>(context->getSettingsRef().exchange_remote_receiver_queue_size)) , enable_disk_writer_metrics(context->getSettingsRef().log_query_exchange) { diff --git a/src/Processors/Exchange/DataTrans/Batch/Writer/DiskPartitionWriter.h b/src/Processors/Exchange/DataTrans/Batch/Writer/DiskPartitionWriter.h index 50ea739b86..2cc7135668 100644 --- a/src/Processors/Exchange/DataTrans/Batch/Writer/DiskPartitionWriter.h +++ b/src/Processors/Exchange/DataTrans/Batch/Writer/DiskPartitionWriter.h @@ -1,4 +1,5 @@ #pragma once +#include #include #include #include @@ -69,7 +70,7 @@ private: DiskPtr disk; Block header; ExchangeDataKeyPtr key; - Poco::Logger * log; + LoggerPtr log; /// data_queue is used here to ensure thread-safety(by background write task) when multiple write/finish are called from different threads /// TODO @lianxuechao optimize for single-thread case std::shared_ptr> data_queue; diff --git a/src/Processors/Exchange/DataTrans/BoundedDataQueue.h b/src/Processors/Exchange/DataTrans/BoundedDataQueue.h index d15a637dfb..8681c980fb 100644 --- a/src/Processors/Exchange/DataTrans/BoundedDataQueue.h +++ b/src/Processors/Exchange/DataTrans/BoundedDataQueue.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -222,7 +223,7 @@ private: std::unique_lock lock(mutex); while (exceedLimit() && !is_closed) { - LOG_TRACE(&Poco::Logger::get("BoundedDataQueue"), fmt::format("Queue is full and waiting, current size: {}, max size: {}", queue.size(), capacity)); + LOG_TRACE(getLogger("BoundedDataQueue"), fmt::format("Queue is full and waiting, current size: {}, max size: {}", queue.size(), capacity)); full_cv.wait(lock); } if (is_closed) diff --git a/src/Processors/Exchange/DataTrans/BroadcastSenderProxy.cpp b/src/Processors/Exchange/DataTrans/BroadcastSenderProxy.cpp index aad7479458..2f0f50e271 100644 --- a/src/Processors/Exchange/DataTrans/BroadcastSenderProxy.cpp +++ b/src/Processors/Exchange/DataTrans/BroadcastSenderProxy.cpp @@ -43,7 +43,7 @@ namespace ErrorCodes } BroadcastSenderProxy::BroadcastSenderProxy(ExchangeDataKeyPtr data_key_, SenderProxyOptions options) - : data_key(std::move(data_key_)), wait_timeout_ms(options.wait_timeout_ms), logger(&Poco::Logger::get("BroadcastSenderProxy")) + : data_key(std::move(data_key_)), wait_timeout_ms(options.wait_timeout_ms), logger(getLogger("BroadcastSenderProxy")) { } diff --git a/src/Processors/Exchange/DataTrans/BroadcastSenderProxy.h b/src/Processors/Exchange/DataTrans/BroadcastSenderProxy.h index 77e0e634fe..603dbe7508 100644 --- a/src/Processors/Exchange/DataTrans/BroadcastSenderProxy.h +++ b/src/Processors/Exchange/DataTrans/BroadcastSenderProxy.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -78,7 +79,7 @@ private: UInt32 wait_timeout_ms; - Poco::Logger * logger; + LoggerPtr logger; }; } diff --git a/src/Processors/Exchange/DataTrans/BroadcastSenderProxyRegistry.cpp b/src/Processors/Exchange/DataTrans/BroadcastSenderProxyRegistry.cpp index c5b4e3dacd..6ed8c785f6 100644 --- a/src/Processors/Exchange/DataTrans/BroadcastSenderProxyRegistry.cpp +++ b/src/Processors/Exchange/DataTrans/BroadcastSenderProxyRegistry.cpp @@ -21,7 +21,7 @@ namespace DB { -BroadcastSenderProxyRegistry::BroadcastSenderProxyRegistry() : logger(&Poco::Logger::get("BroadcastSenderProxyRegistry")) +BroadcastSenderProxyRegistry::BroadcastSenderProxyRegistry() : logger(getLogger("BroadcastSenderProxyRegistry")) { } diff --git a/src/Processors/Exchange/DataTrans/BroadcastSenderProxyRegistry.h b/src/Processors/Exchange/DataTrans/BroadcastSenderProxyRegistry.h index aafe0818a7..7b3fb78538 100644 --- a/src/Processors/Exchange/DataTrans/BroadcastSenderProxyRegistry.h +++ b/src/Processors/Exchange/DataTrans/BroadcastSenderProxyRegistry.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -58,7 +59,7 @@ private: mutable bthread::Mutex mutex; using BroadcastSenderProxyEntry = std::weak_ptr; std::unordered_map proxies; - Poco::Logger * logger; + LoggerPtr logger; }; } diff --git a/src/Processors/Exchange/DataTrans/Brpc/BrpcExchangeReceiverRegistryService.h b/src/Processors/Exchange/DataTrans/Brpc/BrpcExchangeReceiverRegistryService.h index 27ef9868d5..4090c45dbb 100644 --- a/src/Processors/Exchange/DataTrans/Brpc/BrpcExchangeReceiverRegistryService.h +++ b/src/Processors/Exchange/DataTrans/Brpc/BrpcExchangeReceiverRegistryService.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -86,7 +87,7 @@ public: private: ContextMutablePtr context; int max_buf_size; - Poco::Logger * log = &Poco::Logger::get("BrpcExchangeReceiverRegistryService"); + LoggerPtr log = getLogger("BrpcExchangeReceiverRegistryService"); /// stream will be accepted, but the host socket of the accpeted stream /// is not really set yet until done->Run() is called diff --git a/src/Processors/Exchange/DataTrans/Brpc/BrpcRemoteBroadcastReceiver.h b/src/Processors/Exchange/DataTrans/Brpc/BrpcRemoteBroadcastReceiver.h index ac99f136b7..46d8926152 100644 --- a/src/Processors/Exchange/DataTrans/Brpc/BrpcRemoteBroadcastReceiver.h +++ b/src/Processors/Exchange/DataTrans/Brpc/BrpcRemoteBroadcastReceiver.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -92,7 +93,7 @@ public: AsyncRegisterResult registerToSendersAsync(UInt32 timeout_ms); private: String name; - Poco::Logger * log = &Poco::Logger::get("BrpcRemoteBroadcastReceiver"); + LoggerPtr log = getLogger("BrpcRemoteBroadcastReceiver"); ExchangeDataKeyPtr trans_key; String registry_address; ContextPtr context; diff --git a/src/Processors/Exchange/DataTrans/Brpc/BrpcRemoteBroadcastSender.h b/src/Processors/Exchange/DataTrans/Brpc/BrpcRemoteBroadcastSender.h index 1cb1f76564..c95ba712b9 100644 --- a/src/Processors/Exchange/DataTrans/Brpc/BrpcRemoteBroadcastSender.h +++ b/src/Processors/Exchange/DataTrans/Brpc/BrpcRemoteBroadcastSender.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -44,7 +45,7 @@ public: BroadcastSenderType getType() override { return BroadcastSenderType::Brpc; } private: - Poco::Logger * log = &Poco::Logger::get("BrpcRemoteBroadcastSender"); + LoggerPtr log = getLogger("BrpcRemoteBroadcastSender"); ExchangeDataKeyPtrs trans_keys; ContextPtr context; Block header; diff --git a/src/Processors/Exchange/DataTrans/Brpc/StreamHandler.h b/src/Processors/Exchange/DataTrans/Brpc/StreamHandler.h index 5d8d87342d..b627e4c59c 100644 --- a/src/Processors/Exchange/DataTrans/Brpc/StreamHandler.h +++ b/src/Processors/Exchange/DataTrans/Brpc/StreamHandler.h @@ -15,6 +15,7 @@ #pragma once +#include #include "BrpcRemoteBroadcastReceiver.h" #include @@ -42,7 +43,7 @@ public: void on_finished(brpc::StreamId id, int32_t finish_status_code) override; private: ContextPtr context; - Poco::Logger * log = &Poco::Logger::get("StreamHandler"); + LoggerPtr log = getLogger("StreamHandler"); BrpcRemoteBroadcastReceiverWeakPtr receiver; Block header; bool keep_order; diff --git a/src/Processors/Exchange/DataTrans/ConcurrentShardElement.h b/src/Processors/Exchange/DataTrans/ConcurrentShardElement.h index 42014024f7..34174ba82f 100644 --- a/src/Processors/Exchange/DataTrans/ConcurrentShardElement.h +++ b/src/Processors/Exchange/DataTrans/ConcurrentShardElement.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -35,10 +36,10 @@ private: bthread::Mutex mutex; std::unordered_map map_data; std::unordered_map> cvs; - Poco::Logger * log; + LoggerPtr log; public: - ConcurrentShardElement() { log = &Poco::Logger::get("ConcurrentShardElement"); } + ConcurrentShardElement() { log = getLogger("ConcurrentShardElement"); } bool empty() { diff --git a/src/Processors/Exchange/DataTrans/ConcurrentShardMap.h b/src/Processors/Exchange/DataTrans/ConcurrentShardMap.h index bba6c81cb7..f2f47f460d 100644 --- a/src/Processors/Exchange/DataTrans/ConcurrentShardMap.h +++ b/src/Processors/Exchange/DataTrans/ConcurrentShardMap.h @@ -15,6 +15,7 @@ #pragma once +#include #include "ConcurrentShardElement.h" #include @@ -73,7 +74,7 @@ public: } private: - Poco::Logger * log = &Poco::Logger::get("ConcurrentShardMap"); + LoggerPtr log = getLogger("ConcurrentShardMap"); ConcurrentShardElement & getShard(const KeyType & key) { std::size_t const shard_index = hash_function(key) % shards.size(); diff --git a/src/Processors/Exchange/DataTrans/IBroadcastSender.cpp b/src/Processors/Exchange/DataTrans/IBroadcastSender.cpp index 655f5878f9..19d91b0215 100644 --- a/src/Processors/Exchange/DataTrans/IBroadcastSender.cpp +++ b/src/Processors/Exchange/DataTrans/IBroadcastSender.cpp @@ -28,7 +28,7 @@ BroadcastStatus IBroadcastSender::send(Chunk chunk) noexcept } catch (...) { - tryLogCurrentException(&Poco::Logger::get("IBroadcastSender"), __PRETTY_FUNCTION__); + tryLogCurrentException(getLogger("IBroadcastSender"), __PRETTY_FUNCTION__); String exception_str = getCurrentExceptionMessage(true); BroadcastStatus current_status = finish(BroadcastStatusCode::SEND_UNKNOWN_ERROR, exception_str); return current_status; diff --git a/src/Processors/Exchange/DataTrans/Local/LocalBroadcastChannel.cpp b/src/Processors/Exchange/DataTrans/Local/LocalBroadcastChannel.cpp index d30cbf6305..b062b8bc59 100644 --- a/src/Processors/Exchange/DataTrans/Local/LocalBroadcastChannel.cpp +++ b/src/Processors/Exchange/DataTrans/Local/LocalBroadcastChannel.cpp @@ -51,7 +51,7 @@ LocalBroadcastChannel::LocalBroadcastChannel( , options(std::move(options_)) , receive_queue(std::move(queue_)) , context(std::move(context_)) - , logger(&Poco::Logger::get("LocalBroadcastChannel")) + , logger(getLogger("LocalBroadcastChannel")) { } diff --git a/src/Processors/Exchange/DataTrans/Local/LocalBroadcastChannel.h b/src/Processors/Exchange/DataTrans/Local/LocalBroadcastChannel.h index b9b1704e76..410c173b40 100644 --- a/src/Processors/Exchange/DataTrans/Local/LocalBroadcastChannel.h +++ b/src/Processors/Exchange/DataTrans/Local/LocalBroadcastChannel.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -96,6 +97,6 @@ private: BroadcastStatus init_status{BroadcastStatusCode::RUNNING, false, "init"}; std::atomic broadcast_status{&init_status}; ContextPtr context; - Poco::Logger * logger; + LoggerPtr logger; }; } diff --git a/src/Processors/Exchange/DataTrans/MultiPathBoundedQueue.h b/src/Processors/Exchange/DataTrans/MultiPathBoundedQueue.h index fab04c42b0..357a664738 100644 --- a/src/Processors/Exchange/DataTrans/MultiPathBoundedQueue.h +++ b/src/Processors/Exchange/DataTrans/MultiPathBoundedQueue.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -35,7 +36,7 @@ public: peak.store(will_be, std::memory_order_relaxed); } void decrease(const MultiPathDataPacket & packet) { amount.fetch_sub(calculate(packet), std::memory_order_relaxed); } - void logPeakMemoryUsage() const { LOG_TRACE(&Poco::Logger::get("MemoryController"), "Peak memory usage: {}", ReadableSize(peak)); } + void logPeakMemoryUsage() const { LOG_TRACE(getLogger("MemoryController"), "Peak memory usage: {}", ReadableSize(peak)); } private: Int64 calculate(const MultiPathDataPacket & packet) const { diff --git a/src/Processors/Exchange/DataTrans/MultiPathReceiver.cpp b/src/Processors/Exchange/DataTrans/MultiPathReceiver.cpp index bdbb46b711..5320a0e3ca 100644 --- a/src/Processors/Exchange/DataTrans/MultiPathReceiver.cpp +++ b/src/Processors/Exchange/DataTrans/MultiPathReceiver.cpp @@ -50,7 +50,7 @@ MultiPathReceiver::MultiPathReceiver( , sub_receivers(std::move(sub_receivers_)) , header(header_) , name(std::move(name_)) - , logger(&Poco::Logger::get("MultiPathReceiver")) + , logger(getLogger("MultiPathReceiver")) , context(context_) { for (auto & sub_receiver : sub_receivers) diff --git a/src/Processors/Exchange/DataTrans/MultiPathReceiver.h b/src/Processors/Exchange/DataTrans/MultiPathReceiver.h index 4fd44cd8e6..559d614d10 100644 --- a/src/Processors/Exchange/DataTrans/MultiPathReceiver.h +++ b/src/Processors/Exchange/DataTrans/MultiPathReceiver.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -83,7 +84,7 @@ private: BroadcastReceiverPtrs sub_receivers; Block header; String name; - Poco::Logger * logger; + LoggerPtr logger; Stopwatch register_s; ContextPtr context; diff --git a/src/Processors/Exchange/DataTrans/RpcChannelPool.h b/src/Processors/Exchange/DataTrans/RpcChannelPool.h index 3f0e259d73..03b9fd9ccb 100644 --- a/src/Processors/Exchange/DataTrans/RpcChannelPool.h +++ b/src/Processors/Exchange/DataTrans/RpcChannelPool.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -118,7 +119,7 @@ private: bthread::ConditionVariable cv; std::atomic_bool exit{false}; - Poco::Logger * log = &Poco::Logger::get("RpcChannelPool"); + LoggerPtr log = getLogger("RpcChannelPool"); void createExpireTimer(); size_t checkAndClearExpiredPool(const std::string & client_type); diff --git a/src/Processors/Exchange/DataTrans/RpcClient.cpp b/src/Processors/Exchange/DataTrans/RpcClient.cpp index 9eddf1674c..70ded4716e 100644 --- a/src/Processors/Exchange/DataTrans/RpcClient.cpp +++ b/src/Processors/Exchange/DataTrans/RpcClient.cpp @@ -26,7 +26,7 @@ namespace DB { RpcClient::RpcClient(String host_port_, std::function report_err_, brpc::ChannelOptions * options) - : log(&Poco::Logger::get("RpcClient")) + : log(getLogger("RpcClient")) , host_port(std::move(host_port_)) , report_err(std::move(report_err_)) , brpc_channel(std::make_unique()) diff --git a/src/Processors/Exchange/DataTrans/RpcClient.h b/src/Processors/Exchange/DataTrans/RpcClient.h index ab8735bfa1..91676fca0a 100644 --- a/src/Processors/Exchange/DataTrans/RpcClient.h +++ b/src/Processors/Exchange/DataTrans/RpcClient.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -50,7 +51,7 @@ public: protected: void initChannel(brpc::Channel & channel_, const String host_port_, brpc::ChannelOptions * options = nullptr); - Poco::Logger * log; + LoggerPtr log; String host_port; std::function report_err; diff --git a/src/Processors/Exchange/DeserializeBufTransform.cpp b/src/Processors/Exchange/DeserializeBufTransform.cpp index 0c13581e8c..e3e044f374 100644 --- a/src/Processors/Exchange/DeserializeBufTransform.cpp +++ b/src/Processors/Exchange/DeserializeBufTransform.cpp @@ -34,7 +34,7 @@ DeserializeBufTransform::DeserializeBufTransform(const Block & header_, bool ena : ISimpleTransform(Block(), header_, true) , header(getOutputPort().getHeader()) , enable_block_compress(enable_block_compress_) - , logger(&Poco::Logger::get("DeserializeBufTransform")) + , logger(getLogger("DeserializeBufTransform")) { } diff --git a/src/Processors/Exchange/DeserializeBufTransform.h b/src/Processors/Exchange/DeserializeBufTransform.h index 8573054c03..94264cfcfa 100644 --- a/src/Processors/Exchange/DeserializeBufTransform.h +++ b/src/Processors/Exchange/DeserializeBufTransform.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -49,7 +50,7 @@ protected: private: const Block & header; bool enable_block_compress; - Poco::Logger * logger; + LoggerPtr logger; Stopwatch s; }; diff --git a/src/Processors/Exchange/ExchangeBufferedSender.cpp b/src/Processors/Exchange/ExchangeBufferedSender.cpp index 58673f5e6b..afca753d5c 100644 --- a/src/Processors/Exchange/ExchangeBufferedSender.cpp +++ b/src/Processors/Exchange/ExchangeBufferedSender.cpp @@ -34,7 +34,7 @@ ExchangeBufferedSender::ExchangeBufferedSender( , sender(sender_) , threshold_in_bytes(threshold_in_bytes_) , threshold_in_row_num(threshold_in_row_num_) - , logger(&Poco::Logger::get("ExchangeBufferedSender")) + , logger(getLogger("ExchangeBufferedSender")) { resetBuffer(); } diff --git a/src/Processors/Exchange/ExchangeBufferedSender.h b/src/Processors/Exchange/ExchangeBufferedSender.h index c0dd616484..0ee0454331 100644 --- a/src/Processors/Exchange/ExchangeBufferedSender.h +++ b/src/Processors/Exchange/ExchangeBufferedSender.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -39,7 +40,7 @@ private: UInt64 threshold_in_bytes; UInt64 threshold_in_row_num; MutableColumns partition_buffer; - Poco::Logger * logger; + LoggerPtr logger; void resetBuffer(); inline size_t bufferBytes() const; }; diff --git a/src/Processors/Exchange/ExchangeSource.cpp b/src/Processors/Exchange/ExchangeSource.cpp index 188011de30..fbaae34aed 100644 --- a/src/Processors/Exchange/ExchangeSource.cpp +++ b/src/Processors/Exchange/ExchangeSource.cpp @@ -58,7 +58,7 @@ ExchangeSource::ExchangeSource( , options(options_) , totals_source(std::move(totals_source_)) , extremes_source(std::move(extremes_source_)) - , logger(&Poco::Logger::get("ExchangeSource")) + , logger(getLogger("ExchangeSource")) { } @@ -74,7 +74,7 @@ ExchangeSource::ExchangeSource( , options(options_) , totals_source(std::move(totals_source_)) , extremes_source(std::move(extremes_source_)) - , logger(&Poco::Logger::get("ExchangeSource")) + , logger(getLogger("ExchangeSource")) { } diff --git a/src/Processors/Exchange/ExchangeSource.h b/src/Processors/Exchange/ExchangeSource.h index 18013acc22..2eb8bd6999 100644 --- a/src/Processors/Exchange/ExchangeSource.h +++ b/src/Processors/Exchange/ExchangeSource.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include @@ -67,7 +68,7 @@ private: ExchangeExtremesSourcePtr extremes_source; std::atomic was_query_canceled {false}; std::atomic was_receiver_finished {false}; - Poco::Logger * logger; + LoggerPtr logger; void checkBroadcastStatus(const BroadcastStatus & status) const; }; diff --git a/src/Processors/Exchange/LoadBalancedExchangeSink.cpp b/src/Processors/Exchange/LoadBalancedExchangeSink.cpp index d5b0d591b9..4c7e6cda31 100644 --- a/src/Processors/Exchange/LoadBalancedExchangeSink.cpp +++ b/src/Processors/Exchange/LoadBalancedExchangeSink.cpp @@ -41,7 +41,7 @@ LoadBalancedExchangeSink::LoadBalancedExchangeSink(Block header_, BroadcastSende , name(name_) , senders(std::move(senders_)) , partition_selector(std::make_unique(senders.size())) - , logger(&Poco::Logger::get("LoadBalancedExchangeSink")) + , logger(getLogger("LoadBalancedExchangeSink")) { } diff --git a/src/Processors/Exchange/LoadBalancedExchangeSink.h b/src/Processors/Exchange/LoadBalancedExchangeSink.h index 6ebd44a420..ca63919ce5 100644 --- a/src/Processors/Exchange/LoadBalancedExchangeSink.h +++ b/src/Processors/Exchange/LoadBalancedExchangeSink.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -64,7 +65,7 @@ private: Block header = getPort().getHeader(); BroadcastSenderPtrs senders; LoadBalanceSelectorPtr partition_selector; - Poco::Logger * logger; + LoggerPtr logger; }; } diff --git a/src/Processors/Exchange/MultiPartitionExchangeSink.cpp b/src/Processors/Exchange/MultiPartitionExchangeSink.cpp index 1814696dfb..2e3e9000a0 100644 --- a/src/Processors/Exchange/MultiPartitionExchangeSink.cpp +++ b/src/Processors/Exchange/MultiPartitionExchangeSink.cpp @@ -42,7 +42,7 @@ MultiPartitionExchangeSink::MultiPartitionExchangeSink( , repartition_func(std::move(repartition_func_)) , repartition_keys(std::move(repartition_keys_)) , options(options_) - , logger(&Poco::Logger::get("MultiPartitionExchangeSink")) + , logger(getLogger("MultiPartitionExchangeSink")) { bool has_null_shuffle_key = false; diff --git a/src/Processors/Exchange/MultiPartitionExchangeSink.h b/src/Processors/Exchange/MultiPartitionExchangeSink.h index 36a5b4a1d0..94f8585beb 100644 --- a/src/Processors/Exchange/MultiPartitionExchangeSink.h +++ b/src/Processors/Exchange/MultiPartitionExchangeSink.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -71,7 +72,7 @@ private: ExchangeOptions options; ExchangeBufferedSenders buffered_senders; ChunkInfoPtr current_chunk_info; - Poco::Logger * logger; + LoggerPtr logger; const DataTypePtr * repartition_result_type_ptr ; }; diff --git a/src/Processors/Exchange/RepartitionTransform.cpp b/src/Processors/Exchange/RepartitionTransform.cpp index 8ed5c77e06..0e8efa9301 100644 --- a/src/Processors/Exchange/RepartitionTransform.cpp +++ b/src/Processors/Exchange/RepartitionTransform.cpp @@ -36,7 +36,7 @@ RepartitionTransform::RepartitionTransform( , partition_num(partition_num_) , repartition_keys(std::move(repartition_keys_)) , repartition_func(std::move(repartition_func_)) - , logger(&Poco::Logger::get("RepartitionTransform")) + , logger(getLogger("RepartitionTransform")) { } diff --git a/src/Processors/Exchange/RepartitionTransform.h b/src/Processors/Exchange/RepartitionTransform.h index 6072948590..2238066d36 100644 --- a/src/Processors/Exchange/RepartitionTransform.h +++ b/src/Processors/Exchange/RepartitionTransform.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -75,7 +76,7 @@ private: size_t partition_num; ColumnNumbers repartition_keys; ExecutableFunctionPtr repartition_func; - Poco::Logger * logger; + LoggerPtr logger; }; } diff --git a/src/Processors/Exchange/SinglePartitionExchangeSink.cpp b/src/Processors/Exchange/SinglePartitionExchangeSink.cpp index 530c6f2dfb..9782c2a8d3 100644 --- a/src/Processors/Exchange/SinglePartitionExchangeSink.cpp +++ b/src/Processors/Exchange/SinglePartitionExchangeSink.cpp @@ -49,7 +49,7 @@ SinglePartitionExchangeSink::SinglePartitionExchangeSink( , column_num(header.columns()) , options(options_) , buffered_sender(header, sender, options.send_threshold_in_bytes, options.send_threshold_in_row_num) - , logger(&Poco::Logger::get("SinglePartitionExchangeSink")) + , logger(getLogger("SinglePartitionExchangeSink")) { } diff --git a/src/Processors/Exchange/SinglePartitionExchangeSink.h b/src/Processors/Exchange/SinglePartitionExchangeSink.h index d09fb5fcb1..9745d06695 100644 --- a/src/Processors/Exchange/SinglePartitionExchangeSink.h +++ b/src/Processors/Exchange/SinglePartitionExchangeSink.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -65,7 +66,7 @@ private: ExchangeOptions options; ExchangeBufferedSender buffered_sender; ChunkInfoPtr current_chunk_info; - Poco::Logger * logger; + LoggerPtr logger; }; } diff --git a/src/Processors/Executors/PipelineExecutor.cpp b/src/Processors/Executors/PipelineExecutor.cpp index f3543f4ce3..da6dad4f3c 100644 --- a/src/Processors/Executors/PipelineExecutor.cpp +++ b/src/Processors/Executors/PipelineExecutor.cpp @@ -518,7 +518,7 @@ void collectProfileMetricRequest( } void reportToCoordinator( - Poco::Logger * log, + LoggerPtr log, const AddressInfo & coordinator_address, const AddressInfo & current_address, const IProcessor * processor, @@ -546,7 +546,7 @@ void reportToCoordinator( } void reportToCoordinator( - Poco::Logger * log, + LoggerPtr log, const AddressInfo & coordinator_address, const AddressInfo & current_address, const Processors & processors, diff --git a/src/Processors/Executors/PipelineExecutor.h b/src/Processors/Executors/PipelineExecutor.h index c674760623..bead19853b 100644 --- a/src/Processors/Executors/PipelineExecutor.h +++ b/src/Processors/Executors/PipelineExecutor.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -131,7 +132,7 @@ private: std::atomic_bool cancelled; std::atomic_bool finished; - Poco::Logger * log = &Poco::Logger::get("PipelineExecutor"); + LoggerPtr log = getLogger("PipelineExecutor"); /// Things to stop execution to expand pipeline. struct ExpandPipelineTask diff --git a/src/Processors/Formats/IRowInputFormat.cpp b/src/Processors/Formats/IRowInputFormat.cpp index 20088481bc..44523abf31 100644 --- a/src/Processors/Formats/IRowInputFormat.cpp +++ b/src/Processors/Formats/IRowInputFormat.cpp @@ -234,7 +234,7 @@ Chunk IRowInputFormat::generate() { if (num_errors && (params.allow_errors_num > 0 || params.allow_errors_ratio > 0)) { - Poco::Logger * log = &Poco::Logger::get("IRowInputFormat"); + LoggerPtr log = getLogger("IRowInputFormat"); LOG_DEBUG(log, "Skipped {} rows with errors while reading the input stream", num_errors); } diff --git a/src/Processors/Formats/Impl/AvroRowInputFormat.cpp b/src/Processors/Formats/Impl/AvroRowInputFormat.cpp index 11cee96278..848200b8e8 100644 --- a/src/Processors/Formats/Impl/AvroRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/AvroRowInputFormat.cpp @@ -857,7 +857,7 @@ private: try { Poco::URI url(base_url, "/schemas/ids/" + std::to_string(id)); - LOG_TRACE((&Poco::Logger::get("AvroConfluentRowInputFormat")), "Fetching schema id = {}", id); + LOG_TRACE((getLogger("AvroConfluentRowInputFormat")), "Fetching schema id = {}", id); /// One second for connect/send/receive. Just in case. ConnectionTimeouts timeouts({1, 0}, {1, 0}, {1, 0}); @@ -905,7 +905,7 @@ private: Poco::JSON::Parser parser; auto json_body = parser.parse(*response_body).extract(); auto schema = json_body->getValue("schema"); - LOG_TRACE((&Poco::Logger::get("AvroConfluentRowInputFormat")), "Successfully fetched schema id = {}\n{}", id, schema); + LOG_TRACE((getLogger("AvroConfluentRowInputFormat")), "Successfully fetched schema id = {}\n{}", id, schema); return avro::compileJsonSchemaFromString(schema); } catch (const Exception &) diff --git a/src/Processors/Formats/Impl/LMNativeORCBlockInputFormat.h b/src/Processors/Formats/Impl/LMNativeORCBlockInputFormat.h index 202076983d..9577e811b2 100644 --- a/src/Processors/Formats/Impl/LMNativeORCBlockInputFormat.h +++ b/src/Processors/Formats/Impl/LMNativeORCBlockInputFormat.h @@ -1,4 +1,5 @@ #pragma once +#include #include #include #include "Processors/Formats/Impl/OrcChunkReader.h" @@ -54,7 +55,7 @@ protected: ScanParams scan_params; std::vector> scanners; std::vector> init_scanners_once; - Poco::Logger * log = &Poco::Logger::get("LMNativeORCBlockInputFormat"); + LoggerPtr log = getLogger("LMNativeORCBlockInputFormat"); }; diff --git a/src/Processors/Formats/Impl/ORCBlockInputFormat.cpp b/src/Processors/Formats/Impl/ORCBlockInputFormat.cpp index ce25da8d0b..d6cd2162d3 100644 --- a/src/Processors/Formats/Impl/ORCBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ORCBlockInputFormat.cpp @@ -181,7 +181,7 @@ IStorage::ColumnSizeByName ORCBlockInputFormat::getColumnSizes() auto * orc_reader = file_reader->GetRawORCReader(); if (!orc_reader) { - LOG_INFO(&Poco::Logger::get("ORCBlockInputFormat"), "cannot get columns size, raw reader ptr is nullptr."); + LOG_INFO(getLogger("ORCBlockInputFormat"), "cannot get columns size, raw reader ptr is nullptr."); return {}; } return getOrcColumnsSize(*orc_reader); diff --git a/src/Processors/Formats/Impl/OrcChunkReader.h b/src/Processors/Formats/Impl/OrcChunkReader.h index e4a770d94b..f6e9f61cc4 100644 --- a/src/Processors/Formats/Impl/OrcChunkReader.h +++ b/src/Processors/Formats/Impl/OrcChunkReader.h @@ -1,5 +1,6 @@ #pragma once #include "Columns/IColumn.h" +#include #include "Storages/MergeTree/KeyCondition.h" #include "config_formats.h" #if USE_ORC @@ -77,7 +78,7 @@ private: std::set lowcardnull_indices; Block active_header; Block lazy_header; - Poco::Logger * logger = &Poco::Logger::get("OrcScanner"); + LoggerPtr logger = getLogger("OrcScanner"); }; struct ChunkReaderParams @@ -142,7 +143,7 @@ private: std::vector active_fields; std::vector lazy_fields; std::unique_ptr stripe_info = nullptr; - Poco::Logger * logger = &Poco::Logger::get("OrcChunkReader"); + LoggerPtr logger = getLogger("OrcChunkReader"); }; } # endif diff --git a/src/Processors/Formats/Impl/OrcChunkReader.h.orig b/src/Processors/Formats/Impl/OrcChunkReader.h.orig index bb37b0d461..27de132725 100644 --- a/src/Processors/Formats/Impl/OrcChunkReader.h.orig +++ b/src/Processors/Formats/Impl/OrcChunkReader.h.orig @@ -77,7 +77,7 @@ private: std::set lowcardnull_indices; Block active_header; Block lazy_header; - Poco::Logger * logger = &Poco::Logger::get("OrcScanner"); + LoggerPtr logger = getLogger("OrcScanner"); }; struct ChunkReaderParams @@ -142,7 +142,7 @@ private: std::vector active_fields; std::vector lazy_fields; std::unique_ptr stripe_info = nullptr; - Poco::Logger * logger = &Poco::Logger::get("OrcChunkReader"); + LoggerPtr logger = getLogger("OrcChunkReader"); }; } #endif diff --git a/src/Processors/Formats/Impl/OrcChunkReader.h.rej b/src/Processors/Formats/Impl/OrcChunkReader.h.rej index 61b7a4f36e..4379c08495 100644 --- a/src/Processors/Formats/Impl/OrcChunkReader.h.rej +++ b/src/Processors/Formats/Impl/OrcChunkReader.h.rej @@ -1,7 +1,7 @@ --- src/Processors/Formats/Impl/OrcChunkReader.h (before formatting) +++ src/Processors/Formats/Impl/OrcChunkReader.h (after formatting) @@ -145,5 +145,5 @@ - Poco::Logger * logger = &Poco::Logger::get("OrcChunkReader"); + LoggerPtr logger = getLogger("OrcChunkReader"); }; } -#endif diff --git a/src/Processors/Formats/Impl/OrcCommon.cpp b/src/Processors/Formats/Impl/OrcCommon.cpp index 7f5f00e21c..ff7bf4cd98 100644 --- a/src/Processors/Formats/Impl/OrcCommon.cpp +++ b/src/Processors/Formats/Impl/OrcCommon.cpp @@ -512,7 +512,7 @@ static void buildORCSearchArgumentImpl( if (!nested_type->equals(*expect_nested_type)) { LOG_DEBUG( - &Poco::Logger::get(__PRETTY_FUNCTION__), + getLogger(__PRETTY_FUNCTION__), "failed to pushdown filter due to type mismatch, orc type: {}, schema type: {}", expect_nested_type->getName(), nested_type->getName()); @@ -759,7 +759,7 @@ void IOMergeBuffer::mergeSmallRanges(const std::vector & small_ranges) .raw_offset = small_ranges[from].offset, .raw_size = end - small_ranges[from].offset, .ref_count = ref_count}; sb.align(align_size, file_size); buffer_map.insert(std::make_pair(sb.raw_offset + sb.raw_size, sb)); - // LOG_INFO(&Poco::Logger::get("updateMap"), " sb: {} ", sb.toString()); + // LOG_INFO(getLogger("updateMap"), " sb: {} ", sb.toString()); }; size_t unmerge = 0; @@ -843,7 +843,7 @@ arrow::Status IOMergeBuffer::readAtFully(int64_t offset, void * out, int64_t cou if (!ret.ok()) { auto st = random_file->ReadAt(offset, count, out); - // LOG_INFO(&Poco::Logger::get("readAtFully - Direct"), "read from {} to {}", offset, offset + count); + // LOG_INFO(getLogger("readAtFully - Direct"), "read from {} to {}", offset, offset + count); if (!st.ok()) return st.status(); ProfileEvents::increment(ProfileEvents::OrcIODirectCount, 1); @@ -855,13 +855,13 @@ arrow::Status IOMergeBuffer::readAtFully(int64_t offset, void * out, int64_t cou { sb.buffer.reserve(sb.size); auto st = random_file->ReadAt(sb.offset, sb.size, sb.buffer.data()); - // LOG_INFO(&Poco::Logger::get("readAtFully - Shared"), "read from {} to {}", sb.offset, sb.offset + sb.size); + // LOG_INFO(getLogger("readAtFully - Shared"), "read from {} to {}", sb.offset, sb.offset + sb.size); if (!st.ok()) return st.status(); ProfileEvents::increment(ProfileEvents::OrcIOMergedCount, 1); ProfileEvents::increment(ProfileEvents::OrcIOMergedBytes, sb.size); } - // LOG_INFO(&Poco::Logger::get("readAtFully"), "read offset {}, to {} buffer offset {}, to {} ", offset, offset + count, sb.offset, sb.offset + sb.buffer.capacity()); + // LOG_INFO(getLogger("readAtFully"), "read offset {}, to {} buffer offset {}, to {} ", offset, offset + count, sb.offset, sb.offset + sb.buffer.capacity()); uint8_t * buffer = sb.buffer.data() + offset - sb.offset; std::memcpy(out, buffer, count); ProfileEvents::increment(ProfileEvents::OrcIOSharedCount, 1); @@ -1500,7 +1500,7 @@ static ColumnWithTypeAndName readColumnFromORCColumn( column_data.insert_assume_reserved(codes.data(), codes.data() + orc_column->numElements); auto lc = ColumnLowCardinality::create(std::move(dict_column), std::move(indexes_column)); - LOG_TRACE(&Poco::Logger::get(__FUNCTION__), "read lc from dict page with structure {}", lc->dumpStructure()); + LOG_TRACE(getLogger(__FUNCTION__), "read lc from dict page with structure {}", lc->dumpStructure()); return {std::move(lc), type_hint, column_name}; } else if (orc_str_batch->use_codes) @@ -1535,7 +1535,7 @@ static ColumnWithTypeAndName readColumnFromORCColumn( } } auto lc = ColumnLowCardinality::create(std::move(dict_column), std::move(indexes_column)); - LOG_TRACE(&Poco::Logger::get(__FUNCTION__), "read lc from dict page with structure {}", lc->dumpStructure()); + LOG_TRACE(getLogger(__FUNCTION__), "read lc from dict page with structure {}", lc->dumpStructure()); return {std::move(lc), type_hint, column_name}; } else @@ -1546,7 +1546,7 @@ static ColumnWithTypeAndName readColumnFromORCColumn( auto dict_column = column_lc.getDictionaryPtr()->cloneEmpty(); auto lc = ColumnLowCardinality::create(std::move(dict_column), std::move(indexes_column), std::move(full_string_col.column)); - LOG_TRACE(&Poco::Logger::get(__FUNCTION__), "read lc from direct page with structure {}", lc->dumpStructure()); + LOG_TRACE(getLogger(__FUNCTION__), "read lc from direct page with structure {}", lc->dumpStructure()); return {std::move(lc), type_hint, column_name}; } } @@ -1844,7 +1844,7 @@ void ORCColumnToCHColumn::orcColumnsToCHChunk( // if (auto * orc_str_batch = dynamic_cast(orc_column_with_type.first); orc_str_batch) // { // LOG_INFO( - // &Poco::Logger::get(__PRETTY_FUNCTION__), + // getLogger(__PRETTY_FUNCTION__), // "before orc batch {} {} dict {} use codes {}", // orc_column_with_type.first->numElements, // orc_column_with_type.second->toString(), @@ -1854,7 +1854,7 @@ void ORCColumnToCHColumn::orcColumnsToCHChunk( column = readColumnFromORCColumn( orc_column_with_type.first, orc_column_with_type.second, header_column.name, false, allow_out_of_range, header_column.type); // LOG_INFO( - // &Poco::Logger::get(__PRETTY_FUNCTION__), + // getLogger(__PRETTY_FUNCTION__), // "orc batch {} {}, ce {}", // orc_column_with_type.first->numElements, // orc_column_with_type.second->toString(), @@ -1897,7 +1897,7 @@ IStorage::ColumnSizeByName getOrcColumnsSize(orc::Reader & orc_reader) if (!stripe_info_ptr) { - LOG_INFO(&Poco::Logger::get("ORCBlockInputFormat"), "cannot get columns size, stripe info ptr is nullptr."); + LOG_INFO(getLogger("ORCBlockInputFormat"), "cannot get columns size, stripe info ptr is nullptr."); return {}; } const auto & stripe_info = *stripe_info_ptr; diff --git a/src/Processors/Formats/Impl/OrcCommon.h b/src/Processors/Formats/Impl/OrcCommon.h index f25ba7fd91..ac01e5e5fd 100644 --- a/src/Processors/Formats/Impl/OrcCommon.h +++ b/src/Processors/Formats/Impl/OrcCommon.h @@ -1,4 +1,5 @@ #pragma once +#include #include "Processors/Chunk.h" #include "config_formats.h" #if USE_ORC @@ -169,7 +170,7 @@ private: bool case_insensitive_matching; bool allow_out_of_range; NameSet header_columns; - Poco::Logger * logger = &Poco::Logger::get("ORCColumnToCHColumn"); + LoggerPtr logger = getLogger("ORCColumnToCHColumn"); }; IStorage::ColumnSizeByName getOrcColumnsSize(orc::Reader & orc_reader); diff --git a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h index e7a6435981..14f322a879 100644 --- a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h +++ b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -80,7 +81,7 @@ public: { collectorThreadFunction(thread_group); }); - LOG_TRACE(&Poco::Logger::get("ParallelFormattingOutputFormat"), "Parallel formatting is being used"); + LOG_TRACE(getLogger("ParallelFormattingOutputFormat"), "Parallel formatting is being used"); } ~ParallelFormattingOutputFormat() override diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.h b/src/Processors/Formats/Impl/ParallelParsingInputFormat.h index bc68aaf7d9..42d1581f16 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.h +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -97,7 +98,7 @@ public: // bump into reader thread on wraparound. processing_units.resize(params.max_threads + 2); - LOG_TRACE(&Poco::Logger::get("ParallelParsingInputFormat"), "Parallel parsing is used"); + LOG_TRACE(getLogger("ParallelParsingInputFormat"), "Parallel parsing is used"); } ~ParallelParsingInputFormat() override diff --git a/src/Processors/Formats/Impl/Parquet/ParquetLeafColReader.cpp b/src/Processors/Formats/Impl/Parquet/ParquetLeafColReader.cpp index 71d60336b6..d1bfdaeb9f 100644 --- a/src/Processors/Formats/Impl/Parquet/ParquetLeafColReader.cpp +++ b/src/Processors/Formats/Impl/Parquet/ParquetLeafColReader.cpp @@ -233,7 +233,7 @@ ParquetLeafColReader::ParquetLeafColReader( , base_data_type(base_type_) , col_chunk_meta(std::move(meta_)) , parquet_page_reader(std::move(reader_)) - , log(&Poco::Logger::get("ParquetLeafColReader")) + , log(getLogger("ParquetLeafColReader")) { } diff --git a/src/Processors/Formats/Impl/Parquet/ParquetLeafColReader.h b/src/Processors/Formats/Impl/Parquet/ParquetLeafColReader.h index 47ebba3e90..4fa149df4a 100644 --- a/src/Processors/Formats/Impl/Parquet/ParquetLeafColReader.h +++ b/src/Processors/Formats/Impl/Parquet/ParquetLeafColReader.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include "ParquetColumnReader.h" @@ -48,7 +49,7 @@ private: size_t num_values_remaining_in_page = 0; bool reading_low_cardinality = false; - Poco::Logger * log; + LoggerPtr log; void resetColumn(UInt64 rows_num); void resetColumn(); diff --git a/src/Processors/Formats/Impl/Parquet/ParquetRecordReader.cpp b/src/Processors/Formats/Impl/Parquet/ParquetRecordReader.cpp index af9e577600..e5f7ee6223 100644 --- a/src/Processors/Formats/Impl/Parquet/ParquetRecordReader.cpp +++ b/src/Processors/Formats/Impl/Parquet/ParquetRecordReader.cpp @@ -222,7 +222,7 @@ ParquetRecordReader::ParquetRecordReader( , prewhere_info(std::move(prewhere_info_)) , max_block_size(format_settings.parquet.max_block_size) , row_groups_indices(std::move(row_groups_indices_)) - , log(&Poco::Logger::get("ParquetRecordReader")) + , log(getLogger("ParquetRecordReader")) { /// TODO: implement our own Schema Manifest std::ignore = parquet::arrow::SchemaManifest::Make( diff --git a/src/Processors/Formats/Impl/Parquet/ParquetRecordReader.h b/src/Processors/Formats/Impl/Parquet/ParquetRecordReader.h index 8536a75a5f..2ed35cbfa3 100644 --- a/src/Processors/Formats/Impl/Parquet/ParquetRecordReader.h +++ b/src/Processors/Formats/Impl/Parquet/ParquetRecordReader.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -91,7 +92,7 @@ private: size_t cur_row_group_left_rows = 0; int next_row_group_idx = 0; - Poco::Logger * log; + LoggerPtr log; bool loadNextRowGroup(); }; diff --git a/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp index 0f7d3a81d2..be201ac9e4 100644 --- a/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp @@ -693,7 +693,7 @@ ParquetSchemaReader::ParquetSchemaReader(ReadBuffer & in_, const FormatSettings NamesAndTypesList ParquetSchemaReader::readSchema() { - LOG_TRACE(&Poco::Logger::get("ParquetSchemaReader"), "start readSchema"); + LOG_TRACE(getLogger("ParquetSchemaReader"), "start readSchema"); std::atomic is_stopped{0}; auto file = asArrowFile(in, format_settings, is_stopped, "Parquet", PARQUET_MAGIC_BYTES); diff --git a/src/Processors/Formats/Impl/ParquetBlockInputFormat.h b/src/Processors/Formats/Impl/ParquetBlockInputFormat.h index a7edeb4c66..a129bcc243 100644 --- a/src/Processors/Formats/Impl/ParquetBlockInputFormat.h +++ b/src/Processors/Formats/Impl/ParquetBlockInputFormat.h @@ -20,6 +20,7 @@ */ #pragma once +#include #include "config_formats.h" #if USE_PARQUET @@ -234,7 +235,7 @@ private: PrewhereInfoPtr prewhere_info; std::shared_ptr field_util; - Poco::Logger * log {&Poco::Logger::get("ParquetBlockInputFormat")}; + LoggerPtr log {getLogger("ParquetBlockInputFormat")}; }; class ParquetSchemaReader : public ISchemaReader diff --git a/src/Processors/Formats/ReadSchemaUtils.cpp b/src/Processors/Formats/ReadSchemaUtils.cpp index 25ee23ed4a..0898ee3281 100644 --- a/src/Processors/Formats/ReadSchemaUtils.cpp +++ b/src/Processors/Formats/ReadSchemaUtils.cpp @@ -98,7 +98,7 @@ std::pair readSchemaFromFormatImpl( const ContextPtr & context) try { - LOG_TRACE(&Poco::Logger::get("readSchemaFromFormatImpl"), " start readSchemaFromFormatImpl"); + LOG_TRACE(getLogger("readSchemaFromFormatImpl"), " start readSchemaFromFormatImpl"); NamesAndTypesList names_and_types; SchemaInferenceMode mode = context->getSettingsRef().schema_inference_mode; // if (format_name && mode == SchemaInferenceMode::UNION && !FormatFactory::instance().checkIfFormatSupportsSubsetOfColumns(*format_name, context, format_settings)) @@ -156,7 +156,7 @@ try if (iterator_data.cached_columns) { - LOG_TRACE(&Poco::Logger::get("readSchemaFromFormatImpl"), "iterator_data cached columns..."); + LOG_TRACE(getLogger("readSchemaFromFormatImpl"), "iterator_data cached columns..."); /// If we have schema in cache, we must also know the format. if (!format_name) @@ -241,7 +241,7 @@ try try { - LOG_TRACE(&Poco::Logger::get("readSchemaFromFormatImpl"), "format name = {}", *format_name); + LOG_TRACE(getLogger("readSchemaFromFormatImpl"), "format name = {}", *format_name); schema_reader = FormatFactory::instance().getSchemaReader(*format_name, *iterator_data.buf, context, format_settings); schema_reader->setMaxRowsAndBytesToRead(max_rows_to_read, max_bytes_to_read); diff --git a/src/Processors/IntermediateResult/CacheManager.cpp b/src/Processors/IntermediateResult/CacheManager.cpp index babf58d351..6d0e5da380 100644 --- a/src/Processors/IntermediateResult/CacheManager.cpp +++ b/src/Processors/IntermediateResult/CacheManager.cpp @@ -19,7 +19,7 @@ namespace ErrorCodes } CacheManager::CacheManager(size_t max_size_in_bytes) - : cache(std::make_shared(max_size_in_bytes)), log(&Poco::Logger::get("CacheManager")) + : cache(std::make_shared(max_size_in_bytes)), log(getLogger("CacheManager")) { } diff --git a/src/Processors/IntermediateResult/CacheManager.h b/src/Processors/IntermediateResult/CacheManager.h index fe6eb52f12..0321a77dbf 100644 --- a/src/Processors/IntermediateResult/CacheManager.h +++ b/src/Processors/IntermediateResult/CacheManager.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -282,7 +283,7 @@ private: // It's not a problem std::shared_ptr cache; Container uncompleted_cache; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Processors/MergeTreeSelectPrepareProcessor.cpp b/src/Processors/MergeTreeSelectPrepareProcessor.cpp index ecdb391793..c58dc3e219 100644 --- a/src/Processors/MergeTreeSelectPrepareProcessor.cpp +++ b/src/Processors/MergeTreeSelectPrepareProcessor.cpp @@ -26,7 +26,7 @@ MergeTreeSelectPrepareProcessor::MergeTreeSelectPrepareProcessor( IProcessor::Status MergeTreeSelectPrepareProcessor::prepare() { - // LOG_ERROR(&Poco::Logger::get("MergeTreeSelectPrepareProcessor"), "thread:{}", current_thread->thread_id); + // LOG_ERROR(getLogger("MergeTreeSelectPrepareProcessor"), "thread:{}", current_thread->thread_id); if (inputs.empty()) { if (start_expand) @@ -80,7 +80,7 @@ void MergeTreeSelectPrepareProcessor::work() if (!RuntimeFilterManager::getInstance().getDynamicValue(rf)->isReady()) { LOG_DEBUG( - &Poco::Logger::get("MergeTreeSelectPrepareProcessor"), + getLogger("MergeTreeSelectPrepareProcessor"), "wait time out:{} rf:{}", timing.elapsed(), rf); @@ -95,7 +95,7 @@ void MergeTreeSelectPrepareProcessor::work() if (all_ready) { - // LOG_DEBUG(&Poco::Logger::get("MergeTreeSelectPrepareProcessor"), "cost time:{} thread:{}", timing.elapsed(), + // LOG_DEBUG(getLogger("MergeTreeSelectPrepareProcessor"), "cost time:{} thread:{}", timing.elapsed(), // current_thread->thread_id); poll_done = true; } diff --git a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp index 2d873aa6bc..ec999edaca 100644 --- a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp @@ -27,7 +27,7 @@ CollapsingSortedAlgorithm::CollapsingSortedAlgorithm( const String & sign_column, bool only_positive_sign_, size_t max_block_size, - Poco::Logger * log_, + LoggerPtr log_, WriteBuffer * out_row_sources_buf_, bool use_average_block_sizes) : IMergingAlgorithmWithSharedChunks(num_inputs, std::move(description_), out_row_sources_buf_, max_row_refs) diff --git a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h index 18ebaad559..88350eba5a 100644 --- a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h @@ -1,4 +1,5 @@ #pragma once +#include #include #include #include @@ -33,7 +34,7 @@ public: const String & sign_column, bool only_positive_sign_, /// For select final. Skip rows with sum(sign) < 0. size_t max_block_size, - Poco::Logger * log_, + LoggerPtr log_, WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false); @@ -62,7 +63,7 @@ private: PODArray current_row_sources; /// Sources of rows with the current primary key size_t count_incorrect_data = 0; /// To prevent too many error messages from writing to the log. - Poco::Logger * log; + LoggerPtr log; void reportIncorrectData(); void insertRow(RowRef & row); diff --git a/src/Processors/Merges/Algorithms/RowRef.h b/src/Processors/Merges/Algorithms/RowRef.h index e4610c8858..ef6dcd4e66 100644 --- a/src/Processors/Merges/Algorithms/RowRef.h +++ b/src/Processors/Merges/Algorithms/RowRef.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -83,7 +84,7 @@ public: { if (free_chunks.size() != chunks.size()) { - LOG_ERROR(&Poco::Logger::get("SharedChunkAllocator"), "SharedChunkAllocator was destroyed before RowRef was released. StackTrace: {}", StackTrace().toString()); + LOG_ERROR(getLogger("SharedChunkAllocator"), "SharedChunkAllocator was destroyed before RowRef was released. StackTrace: {}", StackTrace().toString()); return; } @@ -100,7 +101,7 @@ private: /// This may happen if allocator was removed before chunks. /// Log message and exit, because we don't want to throw exception in destructor. - LOG_ERROR(&Poco::Logger::get("SharedChunkAllocator"), "SharedChunkAllocator was destroyed before RowRef was released. StackTrace: {}", StackTrace().toString()); + LOG_ERROR(getLogger("SharedChunkAllocator"), "SharedChunkAllocator was destroyed before RowRef was released. StackTrace: {}", StackTrace().toString()); return; } diff --git a/src/Processors/Merges/CollapsingSortedTransform.h b/src/Processors/Merges/CollapsingSortedTransform.h index 9e6bd306ee..6eb05555af 100644 --- a/src/Processors/Merges/CollapsingSortedTransform.h +++ b/src/Processors/Merges/CollapsingSortedTransform.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -27,7 +28,7 @@ public: sign_column, only_positive_sign, max_block_size, - &Poco::Logger::get("CollapsingSortedTransform"), + getLogger("CollapsingSortedTransform"), out_row_sources_buf_, use_average_block_sizes) { diff --git a/src/Processors/Merges/MergingSortedTransform.cpp b/src/Processors/Merges/MergingSortedTransform.cpp index d294e9bdd6..6515aafb66 100644 --- a/src/Processors/Merges/MergingSortedTransform.cpp +++ b/src/Processors/Merges/MergingSortedTransform.cpp @@ -65,7 +65,7 @@ void MergingSortedTransform::onFinish() const auto & merged_data = algorithm.getMergedData(); - auto * log = &Poco::Logger::get("MergingSortedTransform"); + auto log = getLogger("MergingSortedTransform"); double seconds = total_stopwatch.elapsedSeconds(); diff --git a/src/Processors/ResizeProcessor.cpp b/src/Processors/ResizeProcessor.cpp index c39186edc6..fb938fd6b5 100644 --- a/src/Processors/ResizeProcessor.cpp +++ b/src/Processors/ResizeProcessor.cpp @@ -264,7 +264,7 @@ IProcessor::Status ResizeProcessor::prepare(const PortNumbers & updated_inputs, IProcessor::Status StrictResizeProcessor::prepare(const PortNumbers & updated_inputs, const PortNumbers & updated_outputs) { - static auto * logger = &Poco::Logger::get("MultiPartitionExchangeSink"); + static auto logger = getLogger("MultiPartitionExchangeSink"); if (!initialized) { diff --git a/src/Processors/Transforms/AggregatingInOrderTransform.h b/src/Processors/Transforms/AggregatingInOrderTransform.h index 10793e885c..7db19afafe 100644 --- a/src/Processors/Transforms/AggregatingInOrderTransform.h +++ b/src/Processors/Transforms/AggregatingInOrderTransform.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -60,7 +61,7 @@ private: Chunk current_chunk; Chunk to_push_chunk; - Poco::Logger * log = &Poco::Logger::get("AggregatingInOrderTransform"); + LoggerPtr log = getLogger("AggregatingInOrderTransform"); }; diff --git a/src/Processors/Transforms/AggregatingStreamingTransform.h b/src/Processors/Transforms/AggregatingStreamingTransform.h index 166c8765fb..4fbb005d2e 100644 --- a/src/Processors/Transforms/AggregatingStreamingTransform.h +++ b/src/Processors/Transforms/AggregatingStreamingTransform.h @@ -1,4 +1,5 @@ #pragma once +#include #include #include #include @@ -50,7 +51,7 @@ private: Processors processors; AggregatingTransformParamsPtr params; - Poco::Logger * log = &Poco::Logger::get("AggregatingStreamingTransform"); + LoggerPtr log = getLogger("AggregatingStreamingTransform"); ColumnRawPtrs key_columns; Aggregator::AggregateColumns aggregate_columns; diff --git a/src/Processors/Transforms/AggregatingTransform.h b/src/Processors/Transforms/AggregatingTransform.h index 139cdac112..bcdcb7c485 100644 --- a/src/Processors/Transforms/AggregatingTransform.h +++ b/src/Processors/Transforms/AggregatingTransform.h @@ -20,6 +20,7 @@ */ #pragma once +#include #include #include #include @@ -173,7 +174,7 @@ private: Processors processors; AggregatingTransformParamsPtr params; - Poco::Logger * log = &Poco::Logger::get("AggregatingTransform"); + LoggerPtr log = getLogger("AggregatingTransform"); ColumnRawPtrs key_columns; Aggregator::AggregateColumns aggregate_columns; diff --git a/src/Processors/Transforms/BufferTransform.h b/src/Processors/Transforms/BufferTransform.h index 75773d5761..ba64fd328c 100644 --- a/src/Processors/Transforms/BufferTransform.h +++ b/src/Processors/Transforms/BufferTransform.h @@ -1,4 +1,5 @@ #pragma once +#include #include #include @@ -49,7 +50,7 @@ public: if (input.isFinished() && output.isFinished()) { LOG_DEBUG( - &Poco::Logger::get("BufferTransform"), + getLogger("BufferTransform"), "max_used_queue_size:{}/{}, input:[rows:{} bytes:{}], output:[rows:{} bytes:{}]", max_used_queue_size, input_chunk_count, diff --git a/src/Processors/Transforms/CreatingSetsTransform.h b/src/Processors/Transforms/CreatingSetsTransform.h index a847582a98..69e6d4113c 100644 --- a/src/Processors/Transforms/CreatingSetsTransform.h +++ b/src/Processors/Transforms/CreatingSetsTransform.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -7,8 +8,6 @@ #include #include -#include - namespace DB { @@ -52,8 +51,7 @@ private: size_t rows_to_transfer = 0; size_t bytes_to_transfer = 0; - using Logger = Poco::Logger; - Poco::Logger * log = &Poco::Logger::get("CreatingSetsTransform"); + LoggerPtr log = getLogger("CreatingSetsTransform"); bool is_initialized = false; diff --git a/src/Processors/Transforms/IntermediateResultCacheTransform.cpp b/src/Processors/Transforms/IntermediateResultCacheTransform.cpp index d4a777eab8..c8c08ae6d8 100644 --- a/src/Processors/Transforms/IntermediateResultCacheTransform.cpp +++ b/src/Processors/Transforms/IntermediateResultCacheTransform.cpp @@ -25,7 +25,7 @@ IntermediateResultCacheTransform::IntermediateResultCacheTransform( , cache_max_bytes(cache_max_bytes_) , cache_max_rows(cache_max_rows_) , all_part_in_cache(all_part_in_cache_) - , log(&Poco::Logger::get("IntermediateResultCacheTransform")) + , log(getLogger("IntermediateResultCacheTransform")) { } diff --git a/src/Processors/Transforms/IntermediateResultCacheTransform.h b/src/Processors/Transforms/IntermediateResultCacheTransform.h index d639b4b855..65b104cff9 100644 --- a/src/Processors/Transforms/IntermediateResultCacheTransform.h +++ b/src/Processors/Transforms/IntermediateResultCacheTransform.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -49,7 +50,7 @@ private: UInt64 cache_max_rows = 0; bool all_part_in_cache = false; std::unordered_map uncompleted_cache; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Processors/Transforms/MergeSortingTransform.cpp b/src/Processors/Transforms/MergeSortingTransform.cpp index 6793d65c9a..9e2bfe5ab1 100644 --- a/src/Processors/Transforms/MergeSortingTransform.cpp +++ b/src/Processors/Transforms/MergeSortingTransform.cpp @@ -32,7 +32,7 @@ namespace ErrorCodes class BufferingToFileTransform : public IAccumulatingTransform { public: - BufferingToFileTransform(const Block & header, Poco::Logger * log_, std::string path_) + BufferingToFileTransform(const Block & header, LoggerPtr log_, std::string path_) : IAccumulatingTransform(header, header), log(log_) , path(std::move(path_)), file_buf_out(path), compressed_buf_out(file_buf_out) , out_stream(std::make_shared(compressed_buf_out, 0, header)) @@ -81,7 +81,7 @@ public: } private: - Poco::Logger * log; + LoggerPtr log; std::string path; WriteBufferFromFile file_buf_out; CompressedWriteBuffer compressed_buf_out; diff --git a/src/Processors/Transforms/MergeSortingTransform.h b/src/Processors/Transforms/MergeSortingTransform.h index 894e1ea4e3..3f396f21ec 100644 --- a/src/Processors/Transforms/MergeSortingTransform.h +++ b/src/Processors/Transforms/MergeSortingTransform.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -46,7 +47,7 @@ private: size_t sum_rows_in_blocks = 0; size_t sum_bytes_in_blocks = 0; - Poco::Logger * log = &Poco::Logger::get("MergeSortingTransform"); + LoggerPtr log = getLogger("MergeSortingTransform"); /// If remerge doesn't save memory at least several times, mark it as useless and don't do it anymore. bool remerge_is_useful = true; diff --git a/src/Processors/Transforms/MergingAggregatedTransform.h b/src/Processors/Transforms/MergingAggregatedTransform.h index 73e0d8cd01..39579a918b 100644 --- a/src/Processors/Transforms/MergingAggregatedTransform.h +++ b/src/Processors/Transforms/MergingAggregatedTransform.h @@ -1,4 +1,5 @@ #pragma once +#include #include #include #include @@ -21,7 +22,7 @@ protected: private: AggregatingTransformParamsPtr params; - Poco::Logger * log = &Poco::Logger::get("MergingAggregatedTransform"); + LoggerPtr log = getLogger("MergingAggregatedTransform"); size_t max_threads; AggregatedDataVariants data_variants; diff --git a/src/Processors/Transforms/TableFinishTransform.cpp b/src/Processors/Transforms/TableFinishTransform.cpp index 4e55b5bfaf..b424a22752 100644 --- a/src/Processors/Transforms/TableFinishTransform.cpp +++ b/src/Processors/Transforms/TableFinishTransform.cpp @@ -143,7 +143,7 @@ void TableFinishTransform::onFinish() /// Make sure locks are release after transaction commit if (!lock_holders.empty()) lock_holders.clear(); - LOG_DEBUG(&Poco::Logger::get("TableFinishTransform"), "Finish insert select commit in table finish."); + LOG_DEBUG(getLogger("TableFinishTransform"), "Finish insert select commit in table finish."); output.finish(); } diff --git a/src/Processors/examples/read_orc.cpp b/src/Processors/examples/read_orc.cpp index b1d7e11e0a..d72ce249e0 100644 --- a/src/Processors/examples/read_orc.cpp +++ b/src/Processors/examples/read_orc.cpp @@ -222,7 +222,7 @@ void printBlock(const State & state, Block & block, WriteBuffer & wb) void readBySelect(const String & query_str, const String & file_name, size_t limit) { initLogger("trace"); - auto * log = &Poco::Logger::get(__PRETTY_FUNCTION__); + auto log = getLogger(__PRETTY_FUNCTION__); const State & state = State::instance(); ReadBufferFromFile rb(file_name); @@ -247,7 +247,7 @@ void readBySelect(const String & query_str, const String & file_name, size_t lim Status status_read = scanner.readNext(block); if (!status_read.ok()) { - LOG_INFO(&Poco::Logger::get(__PRETTY_FUNCTION__), "exit via {}", status_read.ToString()); + LOG_INFO(getLogger(__PRETTY_FUNCTION__), "exit via {}", status_read.ToString()); return; } LOG_DEBUG(log, block.dumpStructure()); diff --git a/src/Protos/RPCHelpers.h b/src/Protos/RPCHelpers.h index b274572cc5..3b61eddca0 100644 --- a/src/Protos/RPCHelpers.h +++ b/src/Protos/RPCHelpers.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -189,7 +190,7 @@ namespace DB::RPCHelpers } template - void onAsyncCallDoneAssertController(Resp * response, brpc::Controller * cntl, Poco::Logger * logger, String message) + void onAsyncCallDoneAssertController(Resp * response, brpc::Controller * cntl, LoggerPtr logger, String message) { try { diff --git a/src/QueryPlan/AggregatingStep.h b/src/QueryPlan/AggregatingStep.h index 8752de853b..cbaca4ab77 100644 --- a/src/QueryPlan/AggregatingStep.h +++ b/src/QueryPlan/AggregatingStep.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -235,7 +236,7 @@ public: GroupingSetsParamsList prepareGroupingSetsParams() const; private: - Poco::Logger * log = &Poco::Logger::get("TableScanStep"); + LoggerPtr log = getLogger("TableScanStep"); Names keys; NameSet keys_not_hashed; // keys which can be output directly, same as function `any`, but no type loss. diff --git a/src/QueryPlan/FilterStep.cpp b/src/QueryPlan/FilterStep.cpp index dea55faf73..8bf75d7cd0 100644 --- a/src/QueryPlan/FilterStep.cpp +++ b/src/QueryPlan/FilterStep.cpp @@ -219,7 +219,7 @@ std::pair FilterStep::splitLargeInValueList(const Cons std::vector large_in_value_list; for (auto & predicate : PredicateUtils::extractConjuncts(filter)) { - LOG_DEBUG(&Poco::Logger::get("FilterStep"), " predicate : {}", predicate->formatForErrorMessage()); + LOG_DEBUG(getLogger("FilterStep"), " predicate : {}", predicate->formatForErrorMessage()); if (predicate->as() && (predicate->as().name == "in" || diff --git a/src/QueryPlan/GraphvizPrinter.cpp b/src/QueryPlan/GraphvizPrinter.cpp index 7f298354fe..23df8fc1fc 100644 --- a/src/QueryPlan/GraphvizPrinter.cpp +++ b/src/QueryPlan/GraphvizPrinter.cpp @@ -3321,7 +3321,7 @@ void GraphvizPrinter::printChunk(String transform, const Block & block, const Ch value << "\n"; } - LOG_DEBUG(&Poco::Logger::get("GraphvizPrinter"), value.str()); + LOG_DEBUG(getLogger("GraphvizPrinter"), value.str()); } void appendAST( diff --git a/src/QueryPlan/Hints/ImplementJoinOperationHints.cpp b/src/QueryPlan/Hints/ImplementJoinOperationHints.cpp index 5ea2cd8948..9105610b61 100644 --- a/src/QueryPlan/Hints/ImplementJoinOperationHints.cpp +++ b/src/QueryPlan/Hints/ImplementJoinOperationHints.cpp @@ -115,14 +115,14 @@ void JoinOperationHintsVisitor::visitJoinNode(JoinNode & node, Void & v) setStepOptions(new_join_step, DistributionType::BROADCAST, true); node.setStep(new_join_step); node.replaceChildren(PlanNodes{node.getChildren()[1], node.getChildren()[0]}); - LOG_WARNING(&Poco::Logger::get("ImplementJoinOperationHints"), "BROADCAST_JOIN({})", left_broadcast_hint->getOptions().back()); + LOG_WARNING(getLogger("ImplementJoinOperationHints"), "BROADCAST_JOIN({})", left_broadcast_hint->getOptions().back()); } else if (right_broadcast_hint) { auto broadcast_step = std::dynamic_pointer_cast(step.copy(context)); setStepOptions(broadcast_step, DistributionType::BROADCAST, true); node.setStep(broadcast_step); - LOG_WARNING(&Poco::Logger::get("ImplementJoinOperationHints"), "BROADCAST_JOIN({})", right_broadcast_hint->getOptions().back()); + LOG_WARNING(getLogger("ImplementJoinOperationHints"), "BROADCAST_JOIN({})", right_broadcast_hint->getOptions().back()); } else if (left_repartition_hint || right_repartition_hint) { @@ -174,9 +174,9 @@ void JoinOperationHintsVisitor::visitJoinNode(JoinNode & node, Void & v) } if (left_repartition_hint) - LOG_WARNING(&Poco::Logger::get("ImplementJoinOperationHints"), "REPARTITION_JOIN({})", left_repartition_hint->getOptions().back()); + LOG_WARNING(getLogger("ImplementJoinOperationHints"), "REPARTITION_JOIN({})", left_repartition_hint->getOptions().back()); else - LOG_WARNING(&Poco::Logger::get("ImplementJoinOperationHints"), "REPARTITION_JOIN({})", right_repartition_hint->getOptions().back()); + LOG_WARNING(getLogger("ImplementJoinOperationHints"), "REPARTITION_JOIN({})", right_repartition_hint->getOptions().back()); } } diff --git a/src/QueryPlan/Hints/ImplementJoinOrderHints.cpp b/src/QueryPlan/Hints/ImplementJoinOrderHints.cpp index 86a540fa95..2288faf8c4 100644 --- a/src/QueryPlan/Hints/ImplementJoinOrderHints.cpp +++ b/src/QueryPlan/Hints/ImplementJoinOrderHints.cpp @@ -61,7 +61,7 @@ PlanNodePtr JoinOrderHintsVisitor::getLeadingJoinOrder(PlanNodePtr join_ptr, Lea join_ptr = SimpleReorderJoinVisitor::buildJoinTree(output_symbols, join_graph, join_order, context); - LOG_WARNING(&Poco::Logger::get("ImplementJoinOrderHints"), "Leading {} is implemented.", leading_hint->getJoinOrderString()); + LOG_WARNING(getLogger("ImplementJoinOrderHints"), "Leading {} is implemented.", leading_hint->getJoinOrderString()); } return join_ptr; } @@ -119,7 +119,7 @@ PlanNodePtr JoinOrderHintsVisitor::swapJoinOrder(PlanNodePtr node, SwapOrderPtr step->getHints()); PlanNodePtr new_join_node = std::make_shared( context->nextNodeId(), std::move(join_step), PlanNodes{join_node->getChildren()[1], join_node->getChildren()[0]}); - LOG_WARNING(&Poco::Logger::get("ImplementJoinOrderHints"), "swap_join_order{} is implemented.", swap_hint->getJoinOrderString()); + LOG_WARNING(getLogger("ImplementJoinOrderHints"), "swap_join_order{} is implemented.", swap_hint->getJoinOrderString()); return new_join_node; } return node; diff --git a/src/QueryPlan/IntermediateResultCacheStep.cpp b/src/QueryPlan/IntermediateResultCacheStep.cpp index 2ea8e90c46..f14b1cc78a 100644 --- a/src/QueryPlan/IntermediateResultCacheStep.cpp +++ b/src/QueryPlan/IntermediateResultCacheStep.cpp @@ -14,7 +14,7 @@ IntermediateResultCacheStep::IntermediateResultCacheStep( const DataStream & input_stream_, CacheParam cache_param_, Aggregator::Params aggregator_params_) : cache_param(std::move(cache_param_)) , aggregator_params(std::move(aggregator_params_)) - , log(&Poco::Logger::get("IntermediateResultCacheStep")) + , log(getLogger("IntermediateResultCacheStep")) { input_streams.emplace_back(input_stream_); Block output; diff --git a/src/QueryPlan/IntermediateResultCacheStep.h b/src/QueryPlan/IntermediateResultCacheStep.h index 3f51a62b48..cc2512534e 100644 --- a/src/QueryPlan/IntermediateResultCacheStep.h +++ b/src/QueryPlan/IntermediateResultCacheStep.h @@ -1,4 +1,5 @@ #pragma once +#include #include #include #include @@ -63,7 +64,7 @@ private: std::unordered_set ignored_runtime_filters; std::unordered_set included_runtime_filters; Block cache_order; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/QueryPlan/JoinStep.cpp b/src/QueryPlan/JoinStep.cpp index c9482a02bd..2dd9cd30ad 100644 --- a/src/QueryPlan/JoinStep.cpp +++ b/src/QueryPlan/JoinStep.cpp @@ -167,7 +167,7 @@ JoinPtr JoinStep::makeJoin( // TODO: Yuanning RuntimeFilter, compare with CE code when fix // if (enable_parallel_hash_join) // { - // LOG_TRACE(&Poco::Logger::get("JoinStep::makeJoin"), "will use parallel Hash Join"); + // LOG_TRACE(getLogger("JoinStep::makeJoin"), "will use parallel Hash Join"); // std::vector res; // res.reserve(num_streams); // for (size_t i = 0; i < num_streams; ++i) @@ -177,7 +177,7 @@ JoinPtr JoinStep::makeJoin( // consumer->fixParallel(num_streams); // return res; // } - LOG_TRACE(&Poco::Logger::get("JoinStep::makeJoin"), "will use ConcurrentHashJoin"); + LOG_TRACE(getLogger("JoinStep::makeJoin"), "will use ConcurrentHashJoin"); if (consumer) consumer->fixParallel(ConcurrentHashJoin::toPowerOfTwo(std::min(num_streams, 256))); return std::make_shared(table_join, num_streams, context->getSettings().parallel_join_rows_batch_threshold, r_sample_block); @@ -191,10 +191,10 @@ JoinPtr JoinStep::makeJoin( auto parallel = (context->getSettingsRef().grace_hash_join_left_side_parallel != 0 ? context->getSettingsRef().grace_hash_join_left_side_parallel: num_streams); return std::make_shared(context, table_join, l_sample_block, r_sample_block, context->getTempDataOnDisk(), parallel, context->getSettingsRef().spill_mode == SpillMode::AUTO, false, num_streams); } else if (allow_merge_join) { // fallback into merge join - LOG_WARNING(&Poco::Logger::get("JoinStep::makeJoin"), "Grace hash join is not support, fallback into merge join."); + LOG_WARNING(getLogger("JoinStep::makeJoin"), "Grace hash join is not support, fallback into merge join."); return std::make_shared(table_join, r_sample_block); } else { // fallback into hash join when grace hash and merge join not supported - LOG_WARNING(&Poco::Logger::get("JoinStep::makeJoin"), "Grace hash join and merge join is not support, fallback into hash join."); + LOG_WARNING(getLogger("JoinStep::makeJoin"), "Grace hash join and merge join is not support, fallback into hash join."); return std::make_shared(table_join, r_sample_block); } } @@ -208,10 +208,10 @@ JoinPtr JoinStep::makeJoin( auto parallel = (context->getSettingsRef().grace_hash_join_left_side_parallel != 0 ? context->getSettingsRef().grace_hash_join_left_side_parallel: num_streams); return std::make_shared(context, table_join, l_sample_block, r_sample_block, context->getTempDataOnDisk(), parallel, context->getSettingsRef().spill_mode == SpillMode::AUTO, false, num_streams); } else if (allow_merge_join) { // fallback into merge join - LOG_WARNING(&Poco::Logger::get("JoinStep::makeJoin"), "Grace hash join is not support, fallback into merge join."); + LOG_WARNING(getLogger("JoinStep::makeJoin"), "Grace hash join is not support, fallback into merge join."); return std::make_shared(table_join, r_sample_block); } else { // fallback into hash join when grace hash and merge join not supported - LOG_WARNING(&Poco::Logger::get("JoinStep::makeJoin"), "Grace hash join and merge join is not support, fallback into hash join."); + LOG_WARNING(getLogger("JoinStep::makeJoin"), "Grace hash join and merge join is not support, fallback into hash join."); return std::make_shared(table_join, r_sample_block); } } diff --git a/src/QueryPlan/PlanCache.cpp b/src/QueryPlan/PlanCache.cpp index f2280fb378..3df6b46755 100644 --- a/src/QueryPlan/PlanCache.cpp +++ b/src/QueryPlan/PlanCache.cpp @@ -21,7 +21,7 @@ void PlanCacheManager::initialize(ContextMutablePtr context) auto * manager_instance = context->getPlanCacheManager(); if (manager_instance->cache) { - LOG_WARNING(&Poco::Logger::get("PlanCacheManager"), "PlanCacheManager already initialized"); + LOG_WARNING(getLogger("PlanCacheManager"), "PlanCacheManager already initialized"); return; } diff --git a/src/QueryPlan/QueryPlan.h b/src/QueryPlan/QueryPlan.h index b965a8c6bd..3b17a9022a 100644 --- a/src/QueryPlan/QueryPlan.h +++ b/src/QueryPlan/QueryPlan.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -209,7 +210,7 @@ public: void prepare(const PreparedStatementContext & prepared_context); private: - Poco::Logger * log = &Poco::Logger::get("QueryPlan"); + LoggerPtr log = getLogger("QueryPlan"); // Flatten, in segment only Nodes nodes; CTENodes cte_nodes; // won't serialize diff --git a/src/QueryPlan/ReadFromMergeTree.cpp b/src/QueryPlan/ReadFromMergeTree.cpp index 8a1b0b2c95..5657e601fe 100644 --- a/src/QueryPlan/ReadFromMergeTree.cpp +++ b/src/QueryPlan/ReadFromMergeTree.cpp @@ -133,7 +133,7 @@ static Array extractMapColumnKeys(const MergeTreeMetaBase & data, const MergeTre map_types[it->name] = it->type; } - Poco::Logger * logger = nullptr; + LoggerPtr logger = nullptr; for (auto & part : parts) { for (auto & [file, _] : part->getChecksums()->files) @@ -149,7 +149,7 @@ static Array extractMapColumnKeys(const MergeTreeMetaBase & data, const MergeTre if (!map_types.count(map_name)) { if (unlikely(logger == nullptr)) - logger = &Poco::Logger::get(data.getLogName() + " (ExtractMapKeys)"); + logger = getLogger(data.getLogName() + " (ExtractMapKeys)"); LOG_WARNING(logger, "Can not find byte map column {} of implicit file {}", map_name, file); continue; } @@ -296,7 +296,7 @@ ReadFromMergeTree::ReadFromMergeTree( bool sample_factor_column_queried_, bool map_column_keys_column_queried_, std::shared_ptr max_block_numbers_to_read_, - Poco::Logger * log_, + LoggerPtr log_, MergeTreeDataSelectAnalysisResultPtr analyzed_result_ptr_) : ISourceStep(DataStream{ .header = query_info_.atomic_predicates.empty() @@ -383,7 +383,7 @@ Pipe ReadFromMergeTree::readFromPool( settings.preferred_block_size_bytes, false); - auto * logger = &Poco::Logger::get(data.getLogName() + " (SelectExecutor)"); + auto logger = getLogger(data.getLogName() + " (SelectExecutor)"); LOG_DEBUG(logger, "Reading approx. {} rows with {} streams", total_rows, max_streams); MergeTreeStreamSettings stream_settings { .min_marks_for_concurrent_read = min_marks_for_concurrent_read, @@ -615,7 +615,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreams( if (info.sum_marks < num_streams * info.min_marks_for_concurrent_read && parts_with_ranges.size() < num_streams) { num_streams = std::max((info.sum_marks + info.min_marks_for_concurrent_read - 1) / info.min_marks_for_concurrent_read, parts_with_ranges.size()); - LOG_TRACE(&Poco::Logger::get("ReadFromMergeTree"), + LOG_TRACE(getLogger("ReadFromMergeTree"), "Shrink the number of streams from {} to {} since data is small.", requested_num_streams, num_streams); } } @@ -1177,7 +1177,7 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead( const MergeTreeMetaBase & data, const Names & real_column_names, bool sample_factor_column_queried, - Poco::Logger * log) + LoggerPtr log) { AnalysisResult result; const auto & settings = context->getSettingsRef(); @@ -1269,7 +1269,7 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead( RewriteDistributedQueryVisitor(query_data).visit(copy_select); auto interpreter = std::make_shared(copy_select, mutable_context, options); interpreter->execute(); - LOG_TRACE(&Poco::Logger::get("ReadFromMergeTree::selectRangesToRead"), "Construct partition filter query {}", queryToString(copy_select)); + LOG_TRACE(getLogger("ReadFromMergeTree::selectRangesToRead"), "Construct partition filter query {}", queryToString(copy_select)); MergeTreeDataSelectExecutor::filterPartsByPartition( parts, diff --git a/src/QueryPlan/ReadFromMergeTree.h b/src/QueryPlan/ReadFromMergeTree.h index 87350bc42d..eae4fafea4 100644 --- a/src/QueryPlan/ReadFromMergeTree.h +++ b/src/QueryPlan/ReadFromMergeTree.h @@ -1,4 +1,5 @@ #pragma once +#include #include #include //#include @@ -131,7 +132,7 @@ public: bool sample_factor_column_queried_, bool map_column_keys_column_queried_, std::shared_ptr max_block_numbers_to_read_, - Poco::Logger * log_, + LoggerPtr log_, MergeTreeDataSelectAnalysisResultPtr analyzed_result_ptr_ ); @@ -166,7 +167,7 @@ public: const MergeTreeMetaBase & data, const Names & real_column_names, bool sample_factor_column_queried, - Poco::Logger * log); + LoggerPtr log); ContextPtr getContext() const { return context; } const SelectQueryInfo & getQueryInfo() const { return query_info; } @@ -201,7 +202,7 @@ private: std::shared_ptr max_block_numbers_to_read; - Poco::Logger * log; + LoggerPtr log; UInt64 selected_parts = 0; UInt64 selected_rows = 0; UInt64 selected_marks = 0; diff --git a/src/QueryPlan/RemoteExchangeSourceStep.cpp b/src/QueryPlan/RemoteExchangeSourceStep.cpp index e7a283bd50..452f765ea8 100644 --- a/src/QueryPlan/RemoteExchangeSourceStep.cpp +++ b/src/QueryPlan/RemoteExchangeSourceStep.cpp @@ -59,7 +59,7 @@ RemoteExchangeSourceStep::RemoteExchangeSourceStep(PlanSegmentInputs inputs_, Da : ISourceStep(DataStream{.header = inputs_[0]->getHeader()}), inputs(std::move(inputs_)), is_add_totals(is_add_totals_), is_add_extremes(is_add_extremes_) { input_streams.emplace_back(std::move(input_stream_)); - logger = &Poco::Logger::get("RemoteExchangeSourceStep"); + logger = getLogger("RemoteExchangeSourceStep"); } void RemoteExchangeSourceStep::toProto(Protos::RemoteExchangeSourceStep & proto, bool) const diff --git a/src/QueryPlan/RemoteExchangeSourceStep.h b/src/QueryPlan/RemoteExchangeSourceStep.h index acb3ccf57e..b6d0998ccf 100644 --- a/src/QueryPlan/RemoteExchangeSourceStep.h +++ b/src/QueryPlan/RemoteExchangeSourceStep.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -84,7 +85,7 @@ private: std::shared_ptr query_exchange_log); PlanSegmentInputs inputs; PlanSegment * plan_segment = nullptr; - Poco::Logger * logger; + LoggerPtr logger; size_t plan_segment_id; String query_id; String coordinator_address; diff --git a/src/QueryPlan/TableFinishStep.cpp b/src/QueryPlan/TableFinishStep.cpp index 70688132f6..bd955330c2 100644 --- a/src/QueryPlan/TableFinishStep.cpp +++ b/src/QueryPlan/TableFinishStep.cpp @@ -26,7 +26,7 @@ TableFinishStep::TableFinishStep( , output_affected_row_count_symbol(std::move(output_affected_row_count_symbol_)) , query(query_) , insert_select_with_profiles(insert_select_with_profiles_) - , log(&Poco::Logger::get("TableFinishStep")) + , log(getLogger("TableFinishStep")) { if (insert_select_with_profiles) { diff --git a/src/QueryPlan/TableFinishStep.h b/src/QueryPlan/TableFinishStep.h index 435de56473..0fcb1d23fe 100644 --- a/src/QueryPlan/TableFinishStep.h +++ b/src/QueryPlan/TableFinishStep.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -56,6 +57,6 @@ private: String output_affected_row_count_symbol; ASTPtr query; bool insert_select_with_profiles; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/QueryPlan/TableScanStep.cpp b/src/QueryPlan/TableScanStep.cpp index eedc475549..d20b28d4a8 100644 --- a/src/QueryPlan/TableScanStep.cpp +++ b/src/QueryPlan/TableScanStep.cpp @@ -259,7 +259,7 @@ private: MergeTreeDataSelectExecutor merge_tree_reader; const SelectQueryInfo & select_query_info; ContextPtr context; - Poco::Logger * log; + LoggerPtr log; bool has_aggregate; std::optional query_lineage; @@ -278,7 +278,7 @@ TableScanExecutor::TableScanExecutor(TableScanStep & step, const MergeTreeMetaBa , merge_tree_reader(storage) , select_query_info(step.getQueryInfo()) , context(std::move(context_)) - , log(&Poco::Logger::get("TableScanExecutor")) + , log(getLogger("TableScanExecutor")) { if (storage_metadata->projections.empty()) return; @@ -799,7 +799,7 @@ TableScanStep::TableScanStep( , pushdown_filter(std::move(filter_)) , bucket_scan(bucket_scan_) , alias(alias_) - , log(&Poco::Logger::get("TableScanStep")) + , log(getLogger("TableScanStep")) { const auto & table_expression = getTableExpression(*query_info.getSelectQuery(), 0); if (table_expression && table_expression->table_function) @@ -896,7 +896,7 @@ TableScanStep::TableScanStep( , pushdown_filter(std::move(filter_)) , table_output_stream(std::move(table_output_stream_)) , alias(alias_) - , log(&Poco::Logger::get("TableScanStep")) + , log(getLogger("TableScanStep")) { column_names.clear(); for (auto & item : column_alias) @@ -1225,7 +1225,7 @@ void TableScanStep::initializePipeline(QueryPipeline & pipeline, const BuildQuer bool use_optimizer_projection_selection = build_context.context->getSettingsRef().optimizer_projection_support && is_merge_tree && !use_projection_index; - LOG_INFO(&Poco::Logger::get("test"), "initTableScan, limit={}, !empty={}", query->limitLength() ? serializeAST(*query->limitLength()) : "nothing", use_projection_index || use_optimizer_projection_selection); + LOG_INFO(getLogger("test"), "initTableScan, limit={}, !empty={}", query->limitLength() ? serializeAST(*query->limitLength()) : "nothing", use_projection_index || use_optimizer_projection_selection); rewriteInForBucketTable(build_context.context); stage_watch.start(); diff --git a/src/QueryPlan/TableScanStep.h b/src/QueryPlan/TableScanStep.h index 636ad0b684..ba970ea847 100644 --- a/src/QueryPlan/TableScanStep.h +++ b/src/QueryPlan/TableScanStep.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -101,7 +102,7 @@ public: , table_output_stream(std::move(table_output_stream_)) , bucket_scan(bucket_scan_) , alias(alias_) - , log(&Poco::Logger::get("TableScanStep")) + , log(getLogger("TableScanStep")) { if (storage) storage_id.uuid = storage->getStorageUUID(); @@ -247,7 +248,7 @@ private: // Only for worker. bool is_null_source{false}; - Poco::Logger * log; + LoggerPtr log; // Optimises the where clauses for a bucket table by rewriting the IN clause and hence reducing the IN set size void rewriteInForBucketTable(ContextPtr context) const; diff --git a/src/QueryPlan/TableWriteStep.cpp b/src/QueryPlan/TableWriteStep.cpp index 97a34543b2..b25b894366 100644 --- a/src/QueryPlan/TableWriteStep.cpp +++ b/src/QueryPlan/TableWriteStep.cpp @@ -71,7 +71,7 @@ BlockOutputStreams TableWriteStep::createOutputStream( auto query_settings = settings.context->getSettingsRef(); if (target_table->supportsParallelInsert() && query_settings.max_insert_threads > 1) { - LOG_INFO(&Poco::Logger::get("TableWriteStep"), + LOG_INFO(getLogger("TableWriteStep"), fmt::format("createOutputStream support parallel insert, max threads:{}, max insert threads.size:{}", max_threads, query_settings.max_insert_threads)); out_streams_size = std::min(size_t(query_settings.max_insert_threads), max_threads); } @@ -168,11 +168,11 @@ void TableWriteStep::transformPipeline(QueryPipeline & pipeline, const BuildQuer [&](const Block & current_header) -> ProcessorPtr { return std::make_shared(current_header, min_insert_block_size_rows, min_insert_block_size_bytes);} ); - LOG_INFO(&Poco::Logger::get("TableWriteStep"), fmt::format("squash min insert block size rows:{}, min insert block size bytes:{}", min_insert_block_size_rows, min_insert_block_size_bytes)); + LOG_INFO(getLogger("TableWriteStep"), fmt::format("squash min insert block size rows:{}, min insert block size bytes:{}", min_insert_block_size_rows, min_insert_block_size_bytes)); } - //LOG_DEBUG(&Poco::Logger::get("TableWriteStep"), fmt::format("output header: {}", stream->getHeader().dumpStructure())); + //LOG_DEBUG(getLogger("TableWriteStep"), fmt::format("output header: {}", stream->getHeader().dumpStructure())); pipeline.resize(out_streams.size()); - LOG_INFO(&Poco::Logger::get("TableWriteStep"), fmt::format("pipeline size: {}, out streams size {}", pipeline.getNumStreams(), out_streams.size())); + LOG_INFO(getLogger("TableWriteStep"), fmt::format("pipeline size: {}, out streams size {}", pipeline.getNumStreams(), out_streams.size())); if (insert_select_with_profiles) { diff --git a/src/ResourceGroup/IResourceGroupManager.cpp b/src/ResourceGroup/IResourceGroupManager.cpp index 4859b39713..af715e6e29 100644 --- a/src/ResourceGroup/IResourceGroupManager.cpp +++ b/src/ResourceGroup/IResourceGroupManager.cpp @@ -145,13 +145,13 @@ ResourceSelectCase::QueryType ResourceSelectCase::getQueryType(const DB::IAST * void IResourceGroupManager::enable() { disabled.store(false, std::memory_order_relaxed); - LOG_DEBUG(&Poco::Logger::get("ResourceGroupManager"), "enabled"); + LOG_DEBUG(getLogger("ResourceGroupManager"), "enabled"); } void IResourceGroupManager::disable() { disabled.store(true, std::memory_order_relaxed); - LOG_DEBUG(&Poco::Logger::get("ResourceGroupManager"), "disabled"); + LOG_DEBUG(getLogger("ResourceGroupManager"), "disabled"); } IResourceGroupManager::Container IResourceGroupManager::getGroups() const diff --git a/src/ResourceGroup/InternalResourceGroupManager.cpp b/src/ResourceGroup/InternalResourceGroupManager.cpp index 5d9fb96db0..d5e74dfee0 100644 --- a/src/ResourceGroup/InternalResourceGroupManager.cpp +++ b/src/ResourceGroup/InternalResourceGroupManager.cpp @@ -40,10 +40,10 @@ namespace ErrorCodes void InternalResourceGroupManager::initialize(const Poco::Util::AbstractConfiguration &config) { - LOG_DEBUG(&Poco::Logger::get("ResourceGroupManager"), "Load resource group manager"); + LOG_DEBUG(getLogger("ResourceGroupManager"), "Load resource group manager"); if (!root_groups.empty()) { - LOG_WARNING(&Poco::Logger::get("ResourceGroupManager"), "need to restart to reload config."); + LOG_WARNING(getLogger("ResourceGroupManager"), "need to restart to reload config."); return; } @@ -61,7 +61,7 @@ void InternalResourceGroupManager::initialize(const Poco::Util::AbstractConfigur throw Exception("Resource group has no name", ErrorCodes::RESOURCE_GROUP_ILLEGAL_CONFIG); String name = config.getString(prefixWithKey + ".name"); - LOG_DEBUG(&Poco::Logger::get("ResourceGroupManager"), "Found resource group {}", name); + LOG_DEBUG(getLogger("ResourceGroupManager"), "Found resource group {}", name); if (groups.find(name) != groups.end()) throw Exception("Resource group name duplicated: " + name, ErrorCodes::RESOURCE_GROUP_ILLEGAL_CONFIG); @@ -164,7 +164,7 @@ void InternalResourceGroupManager::initialize(const Poco::Util::AbstractConfigur if (key.find("case") == 0) { ResourceSelectCase select_case; - LOG_DEBUG(&Poco::Logger::get("ResourceGroupManager"), "Found resource group case {}", key); + LOG_DEBUG(getLogger("ResourceGroupManager"), "Found resource group case {}", key); if (!config.has(prefixWithKey + ".resource_group")) throw Exception("Select case " + key + " does not config resource group", ErrorCodes::RESOURCE_GROUP_ILLEGAL_CONFIG); select_case.name = key; @@ -190,7 +190,7 @@ void InternalResourceGroupManager::initialize(const Poco::Util::AbstractConfigur select_cases[select_case.name] = std::move(select_case); } } - LOG_DEBUG(&Poco::Logger::get("ResourceGroupManager"), "Found {} resource groups, {} select cases.", + LOG_DEBUG(getLogger("ResourceGroupManager"), "Found {} resource groups, {} select cases.", groups.size(), select_cases.size()); select_algorithm = config.getString("resource_groups.select_algorithm", "user_query"); diff --git a/src/ResourceGroup/VWQueueSyncThread.cpp b/src/ResourceGroup/VWQueueSyncThread.cpp index cbf1197348..94004e03de 100644 --- a/src/ResourceGroup/VWQueueSyncThread.cpp +++ b/src/ResourceGroup/VWQueueSyncThread.cpp @@ -31,7 +31,7 @@ namespace DB VWQueueSyncThread::VWQueueSyncThread(UInt64 interval_, ContextPtr global_context_) : WithContext(global_context_) , interval(interval_) - , log(&Poco::Logger::get("VWQueueSyncThread")) + , log(getLogger("VWQueueSyncThread")) { LOG_DEBUG(log, "Starting VW Queue Sync"); task = getContext()->getSchedulePool().createTask("VWQueueSyncThread", [this]{ run(); }); @@ -204,7 +204,7 @@ void VWQueueSyncThread::run() auto vw_resource_group_manager = dynamic_cast(resource_group_manager); if (!vw_resource_group_manager) { - LOG_DEBUG(&Poco::Logger::get("VWQueueSyncThread"), "Stopping VW Queue Sync Thread because Resource Group Manager is not of type VWResourceGroupManager"); + LOG_DEBUG(getLogger("VWQueueSyncThread"), "Stopping VW Queue Sync Thread because Resource Group Manager is not of type VWResourceGroupManager"); return; } diff --git a/src/ResourceGroup/VWQueueSyncThread.h b/src/ResourceGroup/VWQueueSyncThread.h index 14f56b4f1d..fe5c06d61c 100644 --- a/src/ResourceGroup/VWQueueSyncThread.h +++ b/src/ResourceGroup/VWQueueSyncThread.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -71,7 +72,7 @@ private: BackgroundSchedulePool::TaskHolder task; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/ResourceGroup/VWResourceGroup.cpp b/src/ResourceGroup/VWResourceGroup.cpp index aea8367b7b..6b47dcdaed 100644 --- a/src/ResourceGroup/VWResourceGroup.cpp +++ b/src/ResourceGroup/VWResourceGroup.cpp @@ -37,7 +37,7 @@ namespace ErrorCodes VWResourceGroup::VWResourceGroup(ContextPtr context_) : WithContext(context_) - , log(&Poco::Logger::get("VWResourceGroup")) {}; + , log(getLogger("VWResourceGroup")) {}; bool isVWQueueSyncOutdated(const VWResourceGroupManager & manager, UInt64 timeout) { diff --git a/src/ResourceGroup/VWResourceGroup.h b/src/ResourceGroup/VWResourceGroup.h index c5c8e20f6f..4a43186df5 100644 --- a/src/ResourceGroup/VWResourceGroup.h +++ b/src/ResourceGroup/VWResourceGroup.h @@ -15,6 +15,7 @@ #pragma once +#include #include namespace Poco { class Logger; } @@ -50,7 +51,7 @@ private: mutable std::atomic logged = false; mutable std::atomic running_limit_debug_logged = false; mutable std::atomic queued_limit_debug_logged = false; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/ResourceGroup/VWResourceGroupManager.cpp b/src/ResourceGroup/VWResourceGroupManager.cpp index e9edc84161..0b9d9541db 100644 --- a/src/ResourceGroup/VWResourceGroupManager.cpp +++ b/src/ResourceGroup/VWResourceGroupManager.cpp @@ -103,7 +103,7 @@ IResourceGroup* VWResourceGroupManager::addGroup(const String & virtual_warehous // throw Exception("Select case's query type is illegal: " + key + " -> " + queryType, ErrorCodes::RESOURCE_GROUP_ILLEGAL_CONFIG); // } select_cases[select_case.name] = std::move(select_case); - LOG_DEBUG(&Poco::Logger::get("VWResourceGroupManager"), "Added group " + virtual_warehouse + " using default values"); + LOG_DEBUG(getLogger("VWResourceGroupManager"), "Added group " + virtual_warehouse + " using default values"); return pr.first->second.get(); } @@ -112,14 +112,14 @@ bool VWResourceGroupManager::deleteGroup(const String & virtual_warehouse) const auto lock = getWriteLock(); if (ResourceManagement::isSystemVW(virtual_warehouse)) { - LOG_DEBUG(&Poco::Logger::get("VWResourceGroupManager"), "Skipping deletion of read/write/task/default VWs."); + LOG_DEBUG(getLogger("VWResourceGroupManager"), "Skipping deletion of read/write/task/default VWs."); return false; } auto root_group = root_groups.find(virtual_warehouse); if (root_group == root_groups.end()) { - LOG_DEBUG(&Poco::Logger::get("VWResourceGroupManager"), "Resource group does not exist locally"); + LOG_DEBUG(getLogger("VWResourceGroupManager"), "Resource group does not exist locally"); return false; } auto info = root_group->second->getInfo(); @@ -132,7 +132,7 @@ bool VWResourceGroupManager::deleteGroup(const String & virtual_warehouse) const if (info.queued_queries != 0 || info.running_queries != 0 || info.in_use || info.last_used >= vw_timeout_threshold) { - LOG_DEBUG(&Poco::Logger::get("VWResourceGroupManager"), "Resource group " + virtual_warehouse + " is still being used, and will not be deleted."); + LOG_DEBUG(getLogger("VWResourceGroupManager"), "Resource group " + virtual_warehouse + " is still being used, and will not be deleted."); return false; } @@ -158,7 +158,7 @@ bool VWResourceGroupManager::deleteGroup(const String & virtual_warehouse) const vw_select_case_map.erase(cases); } groups.erase(virtual_warehouse); - LOG_DEBUG(&Poco::Logger::get("VWResourceGroupManager"), "Deleted VWResourceGroup " + virtual_warehouse); + LOG_DEBUG(getLogger("VWResourceGroupManager"), "Deleted VWResourceGroup " + virtual_warehouse); return true; } diff --git a/src/ResourceManagement/ElectionController.h b/src/ResourceManagement/ElectionController.h index 5af1082863..d54c547df2 100644 --- a/src/ResourceManagement/ElectionController.h +++ b/src/ResourceManagement/ElectionController.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -47,7 +48,7 @@ private: // Pulls logical VW and worker group info from KV store. bool pullState(); - Poco::Logger * log = &Poco::Logger::get("ElectionController"); + LoggerPtr log = getLogger("ElectionController"); ResourceManagerController & rm_controller; std::shared_ptr elector; diff --git a/src/ResourceManagement/QueryScheduler.cpp b/src/ResourceManagement/QueryScheduler.cpp index 5ff7c4ece3..2b90595e0d 100644 --- a/src/ResourceManagement/QueryScheduler.cpp +++ b/src/ResourceManagement/QueryScheduler.cpp @@ -58,7 +58,7 @@ static inline bool cmp_worker_disk(const WorkerNodePtr & a, const WorkerNodePtr QueryScheduler::QueryScheduler(VirtualWarehouse & vw_) : vw(vw_) { - log = &Poco::Logger::get(vw.getName() + " (QueryScheduler)"); + log = getLogger(vw.getName() + " (QueryScheduler)"); } /// pickWorkerGroups stage 1: filter groups by requirement. diff --git a/src/ResourceManagement/QueryScheduler.h b/src/ResourceManagement/QueryScheduler.h index cf9525fc79..c1ab22f360 100644 --- a/src/ResourceManagement/QueryScheduler.h +++ b/src/ResourceManagement/QueryScheduler.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include @@ -59,7 +60,7 @@ private: VirtualWarehouse & vw; - Poco::Logger * log; + LoggerPtr log; /// TODO: (zuochuang.zema) With ResourceRequirement, RoundRobin might not be a good strategy anymore as the available workers (groups) changes dynamically. std::atomic pick_group_sequence = 0; /// round-robin index for pickWorkerGroup. diff --git a/src/ResourceManagement/ResourceManager.cpp b/src/ResourceManagement/ResourceManager.cpp index 6269a65b5f..acd23e55a8 100644 --- a/src/ResourceManagement/ResourceManager.cpp +++ b/src/ResourceManagement/ResourceManager.cpp @@ -105,7 +105,7 @@ int ResourceManager::main(const std::vector &) if (consul_http_host != nullptr && consul_http_port != nullptr) brpc::policy::FLAGS_consul_agent_addr = "http://" + createHostPortString(consul_http_host, consul_http_port); - Poco::Logger * log = &logger(); + auto log = getLogger(logger()); LOG_INFO(log, "Resource Manager is starting up..."); auto shared_context = Context::createShared(); diff --git a/src/ResourceManagement/ResourceManager.h b/src/ResourceManagement/ResourceManager.h index 0f0f3e0d21..6c65ac6608 100644 --- a/src/ResourceManagement/ResourceManager.h +++ b/src/ResourceManagement/ResourceManager.h @@ -15,6 +15,7 @@ #pragma once #include +#include #include #include diff --git a/src/ResourceManagement/ResourceManagerClient.cpp b/src/ResourceManagement/ResourceManagerClient.cpp index 9d53490ca8..22d83fc1db 100644 --- a/src/ResourceManagement/ResourceManagerClient.cpp +++ b/src/ResourceManagement/ResourceManagerClient.cpp @@ -270,7 +270,7 @@ void ResourceManagerClient::getWorkerGroups( request.set_last_settings_timestamp(last_settings_timestamp.load()); stub_->getWorkerGroups(&cntl, &request, &response, nullptr); LOG_TRACE( - &Poco::Logger::get("adaptiveScheduler"), + getLogger("adaptiveScheduler"), "getWorkerGroups response: {}", response.ShortDebugString().c_str()); assertController(cntl); RPCHelpers::checkResponse(response); @@ -421,7 +421,7 @@ WorkerGroupData ResourceManagerClient::pickWorkerGroup(const String & vw_name, V stub_->pickWorkerGroup(&cntl, &request, &response, nullptr); LOG_TRACE( - &Poco::Logger::get("adaptiveScheduler"), + getLogger("adaptiveScheduler"), "pickWorkerGroup response: {}", response.ShortDebugString().c_str()); assertController(cntl); RPCHelpers::checkResponse(response); diff --git a/src/ResourceManagement/ResourceManagerController.cpp b/src/ResourceManagement/ResourceManagerController.cpp index 7b156f8b77..97a6fa8858 100644 --- a/src/ResourceManagement/ResourceManagerController.cpp +++ b/src/ResourceManagement/ResourceManagerController.cpp @@ -42,7 +42,7 @@ namespace DB::ErrorCodes namespace DB::ResourceManagement { ResourceManagerController::ResourceManagerController(ContextPtr global_context_) - : WithContext(global_context_), log(&Poco::Logger::get("ResourceManagerController")) + : WithContext(global_context_), log(getLogger("ResourceManagerController")) { resource_tracker = std::make_unique(*this); vw_manager = std::make_unique(*this); diff --git a/src/ResourceManagement/ResourceManagerController.h b/src/ResourceManagement/ResourceManagerController.h index 626d85faca..c765b374cd 100644 --- a/src/ResourceManagement/ResourceManagerController.h +++ b/src/ResourceManagement/ResourceManagerController.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -86,7 +87,7 @@ public: private: - Poco::Logger * log{nullptr}; + LoggerPtr log{nullptr}; std::unique_ptr resource_tracker; std::unique_ptr vw_manager; diff --git a/src/ResourceManagement/ResourceManagerServiceImpl.h b/src/ResourceManagement/ResourceManagerServiceImpl.h index a250438a0f..dbb9ad4dde 100644 --- a/src/ResourceManagement/ResourceManagerServiceImpl.h +++ b/src/ResourceManagement/ResourceManagerServiceImpl.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include @@ -139,7 +140,7 @@ public: ::google::protobuf::Closure * done) override; private: - Poco::Logger * log = &Poco::Logger::get("ResourceManagerServiceImpl"); + LoggerPtr log = getLogger("ResourceManagerServiceImpl"); ResourceManagerController & rm_controller; VirtualWarehouseManager & vw_manager; WorkerGroupManager & group_manager; diff --git a/src/ResourceManagement/ResourceReporter.cpp b/src/ResourceManagement/ResourceReporter.cpp index 7edb69ddec..dd2d6e3cb8 100644 --- a/src/ResourceManagement/ResourceReporter.cpp +++ b/src/ResourceManagement/ResourceReporter.cpp @@ -32,7 +32,7 @@ namespace DB::ResourceManagement ResourceReporterTask::ResourceReporterTask(ContextPtr global_context_) : WithContext(global_context_) - , log(&Poco::Logger::get("ResourceReporterTask")) + , log(getLogger("ResourceReporterTask")) , resource_monitor(std::make_unique(global_context_)) , background_task(global_context_->getSchedulePool().createTask("ResourceReporterTask", [&](){ run(); })) { diff --git a/src/ResourceManagement/ResourceReporter.h b/src/ResourceManagement/ResourceReporter.h index ef7f2180bf..3e91d38524 100644 --- a/src/ResourceManagement/ResourceReporter.h +++ b/src/ResourceManagement/ResourceReporter.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -46,7 +47,7 @@ private: private: std::atomic_bool init_request = true; - Poco::Logger * log; + LoggerPtr log; std::unique_ptr resource_monitor; BackgroundSchedulePool::TaskHolder background_task; }; diff --git a/src/ResourceManagement/ResourceTracker.cpp b/src/ResourceManagement/ResourceTracker.cpp index a057b9a187..a02114652a 100644 --- a/src/ResourceManagement/ResourceTracker.cpp +++ b/src/ResourceManagement/ResourceTracker.cpp @@ -34,7 +34,7 @@ namespace DB::ResourceManagement ResourceTracker::ResourceTracker(ResourceManagerController & rm_controller_) : rm_controller(rm_controller_) - , log(&Poco::Logger::get("ResourceTracker")) + , log(getLogger("ResourceTracker")) , background_task(getContext()->getSchedulePool().createTask("ResourceTrackerTask", [&](){ clearLostWorkers(); })) , register_granularity_sec(getContext()->getRootConfig().resource_manager.worker_register_visible_granularity_sec.value) { diff --git a/src/ResourceManagement/ResourceTracker.h b/src/ResourceManagement/ResourceTracker.h index b8e95a9ec2..7251c2705e 100644 --- a/src/ResourceManagement/ResourceTracker.h +++ b/src/ResourceManagement/ResourceTracker.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -55,7 +56,7 @@ private: void clearLostWorkers(); ResourceManagerController & rm_controller; - Poco::Logger * log; + LoggerPtr log; /// Use bthread::Mutex but not std::mutex to avoid deadlock issue as this lock may lock other rpc API (catalog) in the lock scope. bthread::Mutex node_mutex; std::unordered_map worker_nodes; diff --git a/src/ResourceManagement/VirtualWarehouse.cpp b/src/ResourceManagement/VirtualWarehouse.cpp index 4284d413a3..632a5d4008 100644 --- a/src/ResourceManagement/VirtualWarehouse.cpp +++ b/src/ResourceManagement/VirtualWarehouse.cpp @@ -89,7 +89,7 @@ void VirtualWarehouse::applySettings(const VirtualWarehouseAlterSettings & setti if (setting_changes.cooldown_seconds_after_scaledown) new_settings.cooldown_seconds_after_scaledown = *setting_changes.cooldown_seconds_after_scaledown; - LOG_TRACE(&Poco::Logger::get("VirtualWarehouse"), "update settings alter type {}", setting_changes.queue_alter_type); + LOG_TRACE(getLogger("VirtualWarehouse"), "update settings alter type {}", setting_changes.queue_alter_type); if (setting_changes.queue_alter_type == Protos::QueueAlterType::ADD_RULE) { if (!setting_changes.queue_data) @@ -489,7 +489,7 @@ QueryQueueInfo VirtualWarehouse::getAggQueueInfo() if (it->second.last_sync < timeout_threshold) { LOG_DEBUG( - &Poco::Logger::get("VirtualWarehouse"), + getLogger("VirtualWarehouse"), "Removing outdated server sync from {}, last synced {}", it->first, std::to_string(it->second.last_sync)); diff --git a/src/ResourceManagement/VirtualWarehouseManager.cpp b/src/ResourceManagement/VirtualWarehouseManager.cpp index 6b1f9a3f67..36d1c0d8c0 100644 --- a/src/ResourceManagement/VirtualWarehouseManager.cpp +++ b/src/ResourceManagement/VirtualWarehouseManager.cpp @@ -36,7 +36,7 @@ namespace ErrorCodes namespace DB::ResourceManagement { VirtualWarehouseManager::VirtualWarehouseManager(ResourceManagerController & rm_controller_) - : rm_controller(rm_controller_), log(&Poco::Logger::get("VirtualWarehouseManager")) + : rm_controller(rm_controller_), log(getLogger("VirtualWarehouseManager")) { } diff --git a/src/ResourceManagement/VirtualWarehouseManager.h b/src/ResourceManagement/VirtualWarehouseManager.h index 4079ee98ba..09b3ee5083 100644 --- a/src/ResourceManagement/VirtualWarehouseManager.h +++ b/src/ResourceManagement/VirtualWarehouseManager.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include @@ -48,7 +49,7 @@ public: private: ResourceManagerController & rm_controller; - Poco::Logger * log{nullptr}; + LoggerPtr log{nullptr}; /// Use bthread::Mutex but not std::mutex to avoid deadlock issue as we call other rpc API (catalog) in the lock scope. mutable bthread::Mutex vw_mgr_mutex; diff --git a/src/ResourceManagement/WorkerGroupManager.cpp b/src/ResourceManagement/WorkerGroupManager.cpp index 7c624b2970..0611c4005e 100644 --- a/src/ResourceManagement/WorkerGroupManager.cpp +++ b/src/ResourceManagement/WorkerGroupManager.cpp @@ -36,7 +36,7 @@ namespace ErrorCodes namespace DB::ResourceManagement { WorkerGroupManager::WorkerGroupManager(ResourceManagerController & rm_controller_) - : rm_controller(rm_controller_), log(&Poco::Logger::get("WorkerGroupManager")) + : rm_controller(rm_controller_), log(getLogger("WorkerGroupManager")) { } diff --git a/src/ResourceManagement/WorkerGroupManager.h b/src/ResourceManagement/WorkerGroupManager.h index e598e38c69..2cfb7dd997 100644 --- a/src/ResourceManagement/WorkerGroupManager.h +++ b/src/ResourceManagement/WorkerGroupManager.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -44,7 +45,7 @@ private: WorkerGroupPtr createWorkerGroupObject(const WorkerGroupData & data, std::lock_guard * lock = nullptr); ResourceManagerController & rm_controller; - Poco::Logger * log{nullptr}; + LoggerPtr log{nullptr}; std::atomic_bool need_sync_with_catalog{false}; /// Use bthread::Mutex but not std::mutex to avoid deadlock issue as we call other rpc API (catalog) in the lock scope. mutable bthread::Mutex wg_mgr_mutex; diff --git a/src/ResourceManagement/WorkerGroupResourceCoordinator.cpp b/src/ResourceManagement/WorkerGroupResourceCoordinator.cpp index 576d5c4647..ad98c4460f 100644 --- a/src/ResourceManagement/WorkerGroupResourceCoordinator.cpp +++ b/src/ResourceManagement/WorkerGroupResourceCoordinator.cpp @@ -52,7 +52,7 @@ String toString(CoordinateMode mode) WorkerGroupResourceCoordinator::WorkerGroupResourceCoordinator(ResourceManagerController & rm_controller_) : rm_controller(rm_controller_) - , log(&Poco::Logger::get("ResourceCoordinator")) + , log(getLogger("ResourceCoordinator")) , background_task(rm_controller.getContext()->getSchedulePool().createTask("ResourceCoordinator", [&]() { run(); })) , task_interval_ms(rm_controller.getContext()->getRootConfig().resource_manager.resource_coordinate_task_interval_ms) { diff --git a/src/ResourceManagement/WorkerGroupResourceCoordinator.h b/src/ResourceManagement/WorkerGroupResourceCoordinator.h index 24cca61117..af8e263a3f 100644 --- a/src/ResourceManagement/WorkerGroupResourceCoordinator.h +++ b/src/ResourceManagement/WorkerGroupResourceCoordinator.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include @@ -86,7 +87,7 @@ public: private: ResourceManagerController & rm_controller; - Poco::Logger * log; + LoggerPtr log; CoordinateMode mode{CoordinateMode::Sharing}; BackgroundSchedulePool::TaskHolder background_task; UInt64 task_interval_ms; diff --git a/src/Server/APIRequestHandler.cpp b/src/Server/APIRequestHandler.cpp index 3d1f1491e8..5cdfe362a2 100644 --- a/src/Server/APIRequestHandler.cpp +++ b/src/Server/APIRequestHandler.cpp @@ -96,7 +96,7 @@ namespace ErrorCodes } APIRequestHandler::APIRequestHandler(IServer & server) - : WithMutableContext(server.context()), log(&Poco::Logger::get("HTTPHandler for API")) + : WithMutableContext(server.context()), log(getLogger("HTTPHandler for API")) { server_display_name = getContext()->getConfigRef().getString("display_name", getFQDNOrHostName()); } diff --git a/src/Server/APIRequestHandler.h b/src/Server/APIRequestHandler.h index c43781f4d4..7852f723db 100644 --- a/src/Server/APIRequestHandler.h +++ b/src/Server/APIRequestHandler.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -52,7 +53,7 @@ private: void onResourceReportAction(HTTPServerRequest &, HTMLForm & params, HTTPServerResponse &); - Poco::Logger * log; + LoggerPtr log; std::string server_display_name; }; diff --git a/src/Server/GRPCServer.cpp b/src/Server/GRPCServer.cpp index c28a36db2d..0cd878c907 100644 --- a/src/Server/GRPCServer.cpp +++ b/src/Server/GRPCServer.cpp @@ -86,7 +86,7 @@ namespace static std::once_flag once_flag; std::call_once(once_flag, [&config] { - static Poco::Logger * logger = &Poco::Logger::get("grpc"); + static LoggerPtr logger = getLogger("grpc"); gpr_set_log_function([](gpr_log_func_args* args) { if (args->severity == GPR_LOG_SEVERITY_DEBUG) @@ -540,7 +540,7 @@ namespace class Call { public: - Call(CallType call_type_, std::unique_ptr responder_, IServer & iserver_, Poco::Logger * log_); + Call(CallType call_type_, std::unique_ptr responder_, IServer & iserver_, LoggerPtr log_); ~Call(); void start(const std::function & on_finish_call_callback); @@ -580,7 +580,7 @@ namespace const CallType call_type; std::unique_ptr responder; IServer & iserver; - Poco::Logger * log = nullptr; + LoggerPtr log = nullptr; std::shared_ptr session; ContextMutablePtr query_context; @@ -632,7 +632,7 @@ namespace ThreadFromGlobalPool call_thread; }; - Call::Call(CallType call_type_, std::unique_ptr responder_, IServer & iserver_, Poco::Logger * log_) + Call::Call(CallType call_type_, std::unique_ptr responder_, IServer & iserver_, LoggerPtr log_) : call_type(call_type_), responder(std::move(responder_)), iserver(iserver_), log(log_) { } @@ -1697,7 +1697,7 @@ private: GRPCServer::GRPCServer(IServer & iserver_, const Poco::Net::SocketAddress & address_to_listen_) : iserver(iserver_) , address_to_listen(address_to_listen_) - , log(&Poco::Logger::get("GRPCServer")) + , log(getLogger("GRPCServer")) , runner(std::make_unique(*this)) {} diff --git a/src/Server/GRPCServer.h b/src/Server/GRPCServer.h index ef86b902b5..c38099c7e3 100644 --- a/src/Server/GRPCServer.h +++ b/src/Server/GRPCServer.h @@ -1,6 +1,7 @@ #pragma once #if !defined(ARCADIA_BUILD) +#include #include #endif @@ -44,7 +45,7 @@ private: IServer & iserver; const Poco::Net::SocketAddress address_to_listen; - Poco::Logger * log; + LoggerPtr log; GRPCService grpc_service; std::unique_ptr grpc_server; std::unique_ptr queue; diff --git a/src/Server/HTTPHandler.cpp b/src/Server/HTTPHandler.cpp index 6c88fa3ce4..54e4d91872 100644 --- a/src/Server/HTTPHandler.cpp +++ b/src/Server/HTTPHandler.cpp @@ -285,7 +285,7 @@ void HTTPHandler::pushDelayedResults(Output & used_output) HTTPHandler::HTTPHandler(IServer & server_, const std::string & name) : server(server_) - , log(&Poco::Logger::get(name)) + , log(getLogger(name)) { server_display_name = server.config().getString("display_name", getFQDNOrHostName()); } diff --git a/src/Server/HTTPHandler.h b/src/Server/HTTPHandler.h index 2149a7ca55..6da8141786 100644 --- a/src/Server/HTTPHandler.h +++ b/src/Server/HTTPHandler.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -64,7 +65,7 @@ private: }; IServer & server; - Poco::Logger * log; + LoggerPtr log; /// It is the name of the server that will be sent in an http-header X-ClickHouse-Server-Display-Name. String server_display_name; diff --git a/src/Server/HTTPHandlerFactory.cpp b/src/Server/HTTPHandlerFactory.cpp index 6dc1525879..d907efb5cb 100644 --- a/src/Server/HTTPHandlerFactory.cpp +++ b/src/Server/HTTPHandlerFactory.cpp @@ -35,7 +35,7 @@ static void addDefaultHandlersFactory( HTTPRequestHandlerFactoryMain & factory, IServer & server, AsynchronousMetrics & async_metrics, ContextMutablePtr context); HTTPRequestHandlerFactoryMain::HTTPRequestHandlerFactoryMain(const std::string & name_) - : log(&Poco::Logger::get(name_)), name(name_) + : log(getLogger(name_)), name(name_) { } diff --git a/src/Server/HTTPHandlerFactory.h b/src/Server/HTTPHandlerFactory.h index fb4315f571..3514cd5832 100644 --- a/src/Server/HTTPHandlerFactory.h +++ b/src/Server/HTTPHandlerFactory.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -31,7 +32,7 @@ public: std::unique_ptr createRequestHandler(const HTTPServerRequest & request) override; private: - Poco::Logger * log; + LoggerPtr log; std::string name; std::vector child_factories; diff --git a/src/Server/IPrometheusMetricsWriter.cpp b/src/Server/IPrometheusMetricsWriter.cpp index dc38a38277..314532bdf7 100644 --- a/src/Server/IPrometheusMetricsWriter.cpp +++ b/src/Server/IPrometheusMetricsWriter.cpp @@ -25,7 +25,7 @@ void IPrometheusMetricsWriter::writeConfigMetrics(WriteBuffer & wb) } else { - LOG_WARNING(&Poco::Logger::get("IPrometheusMetricsWriter"), "Unknown config metric found, this should never happen"); + LOG_WARNING(getLogger("IPrometheusMetricsWriter"), "Unknown config metric found, this should never happen"); writeOutLine(wb, key_label, 0); } } diff --git a/src/Server/IServer.h b/src/Server/IServer.h index c55b045d2a..5695ca2770 100644 --- a/src/Server/IServer.h +++ b/src/Server/IServer.h @@ -1,5 +1,6 @@ #pragma once +#include #include namespace Poco diff --git a/src/Server/InterserverIOHTTPHandler.h b/src/Server/InterserverIOHTTPHandler.h index da5b286b9e..0ba4dc2479 100644 --- a/src/Server/InterserverIOHTTPHandler.h +++ b/src/Server/InterserverIOHTTPHandler.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -26,7 +27,7 @@ class InterserverIOHTTPHandler : public HTTPRequestHandler public: explicit InterserverIOHTTPHandler(IServer & server_) : server(server_) - , log(&Poco::Logger::get("InterserverIOHTTPHandler")) + , log(getLogger("InterserverIOHTTPHandler")) { } @@ -39,7 +40,7 @@ private: }; IServer & server; - Poco::Logger * log; + LoggerPtr log; CurrentMetrics::Increment metric_increment{CurrentMetrics::InterserverConnection}; diff --git a/src/Server/KeeperTCPHandler.cpp b/src/Server/KeeperTCPHandler.cpp index ea849ce274..7951f85b53 100644 --- a/src/Server/KeeperTCPHandler.cpp +++ b/src/Server/KeeperTCPHandler.cpp @@ -226,7 +226,7 @@ struct SocketInterruptablePollWrapper KeeperTCPHandler::KeeperTCPHandler(IServer & server_, const Poco::Net::StreamSocket & socket_) : Poco::Net::TCPServerConnection(socket_) , server(server_) - , log(&Poco::Logger::get("KeeperTCPHandler")) + , log(getLogger("KeeperTCPHandler")) , global_context(Context::createCopy(server.context())) , keeper_dispatcher(global_context->getKeeperDispatcher()) , operation_timeout(0, global_context->getConfigRef().getUInt("keeper_server.operation_timeout_ms", Coordination::DEFAULT_OPERATION_TIMEOUT_MS) * 1000) diff --git a/src/Server/KeeperTCPHandler.h b/src/Server/KeeperTCPHandler.h index 0c3ebecaf5..56e041280a 100644 --- a/src/Server/KeeperTCPHandler.h +++ b/src/Server/KeeperTCPHandler.h @@ -28,6 +28,7 @@ #if USE_NURAFT +#include #include #include #include "IServer.h" @@ -82,7 +83,7 @@ public: private: IServer & server; - Poco::Logger * log; + LoggerPtr log; ContextPtr global_context; std::shared_ptr keeper_dispatcher; Poco::Timespan operation_timeout; diff --git a/src/Server/KeeperTCPHandlerFactory.h b/src/Server/KeeperTCPHandlerFactory.h index af7000b368..40f82dbe2e 100644 --- a/src/Server/KeeperTCPHandlerFactory.h +++ b/src/Server/KeeperTCPHandlerFactory.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -35,7 +36,7 @@ class KeeperTCPHandlerFactory : public Poco::Net::TCPServerConnectionFactory { private: IServer & server; - Poco::Logger * log; + LoggerPtr log; class DummyTCPHandler : public Poco::Net::TCPServerConnection { public: @@ -46,7 +47,7 @@ private: public: KeeperTCPHandlerFactory(IServer & server_, bool secure) : server(server_) - , log(&Poco::Logger::get(std::string{"KeeperTCP"} + (secure ? "S" : "") + "HandlerFactory")) + , log(getLogger(std::string{"KeeperTCP"} + (secure ? "S" : "") + "HandlerFactory")) { } diff --git a/src/Server/MySQLHandler.cpp b/src/Server/MySQLHandler.cpp index 71125720a5..e23b0e7052 100644 --- a/src/Server/MySQLHandler.cpp +++ b/src/Server/MySQLHandler.cpp @@ -165,7 +165,7 @@ MySQLHandler::MySQLHandler(IServer & server_, TCPServer & tcp_server_, const Poc : Poco::Net::TCPServerConnection(socket_) , server(server_) , tcp_server(tcp_server_) - , log(&Poco::Logger::get("MySQLHandler")) + , log(getRawLogger("MySQLHandler")) , connection_id(connection_id_) , connection_context(Context::createCopy(server.context())) , auth_plugin(new MySQLProtocol::Authentication::Native41()) @@ -699,7 +699,7 @@ MySQLHandlerSSL::MySQLHandlerSSL(IServer & server_, TCPServer & tcp_server_, con void MySQLHandlerSSL::authPluginSSL() { - auth_plugin = std::make_unique(public_key, private_key, log); + auth_plugin = std::make_unique(public_key, private_key, log->name()); } void MySQLHandlerSSL::finishHandshakeSSL( diff --git a/src/Server/MySQLHandler.h b/src/Server/MySQLHandler.h index 41f2ca9850..6ac998f00b 100644 --- a/src/Server/MySQLHandler.h +++ b/src/Server/MySQLHandler.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -70,7 +71,8 @@ protected: IServer & server; TCPServer & tcp_server; - Poco::Logger * log; + /// stick to raw logger to support log(const Exception& exc) + LoggerRawPtr log; uint32_t connection_id = 0; uint32_t server_capabilities = 0; diff --git a/src/Server/MySQLHandlerFactory.cpp b/src/Server/MySQLHandlerFactory.cpp index f5b405111f..53b8ee14ac 100644 --- a/src/Server/MySQLHandlerFactory.cpp +++ b/src/Server/MySQLHandlerFactory.cpp @@ -22,7 +22,7 @@ namespace ErrorCodes MySQLHandlerFactory::MySQLHandlerFactory(IServer & server_) : server(server_) - , log(&Poco::Logger::get("MySQLHandlerFactory")) + , log(getLogger("MySQLHandlerFactory")) { #if USE_SSL try diff --git a/src/Server/MySQLHandlerFactory.h b/src/Server/MySQLHandlerFactory.h index 8330a1ccc5..a0b812233d 100644 --- a/src/Server/MySQLHandlerFactory.h +++ b/src/Server/MySQLHandlerFactory.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -20,7 +21,7 @@ class MySQLHandlerFactory : public TCPServerConnectionFactory { private: IServer & server; - Poco::Logger * log; + LoggerPtr log; #if USE_SSL struct RSADeleter diff --git a/src/Server/PostgreSQLHandler.h b/src/Server/PostgreSQLHandler.h index 9aaad1d7aa..64e60c734d 100644 --- a/src/Server/PostgreSQLHandler.h +++ b/src/Server/PostgreSQLHandler.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -34,7 +35,8 @@ public: void run() final; private: - Poco::Logger * log = &Poco::Logger::get("PostgreSQLHandler"); + /// stick to raw logger to support log(const Exception& exc) + LoggerRawPtr log = getRawLogger("PostgreSQLHandler"); IServer & server; ContextMutablePtr connection_context; diff --git a/src/Server/PostgreSQLHandlerFactory.cpp b/src/Server/PostgreSQLHandlerFactory.cpp index 1158cf5835..01acbbda75 100644 --- a/src/Server/PostgreSQLHandlerFactory.cpp +++ b/src/Server/PostgreSQLHandlerFactory.cpp @@ -8,7 +8,7 @@ namespace DB PostgreSQLHandlerFactory::PostgreSQLHandlerFactory(IServer & server_) : server(server_) - , log(&Poco::Logger::get("PostgreSQLHandlerFactory")) + , log(getLogger("PostgreSQLHandlerFactory")) { auth_methods = { diff --git a/src/Server/PostgreSQLHandlerFactory.h b/src/Server/PostgreSQLHandlerFactory.h index 4550e9ee8e..31535f1985 100644 --- a/src/Server/PostgreSQLHandlerFactory.h +++ b/src/Server/PostgreSQLHandlerFactory.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -13,7 +14,7 @@ class PostgreSQLHandlerFactory : public Poco::Net::TCPServerConnectionFactory { private: IServer & server; - Poco::Logger * log; + LoggerPtr log; #if USE_SSL bool ssl_enabled = true; diff --git a/src/Server/ProfilerRequestHandler.cpp b/src/Server/ProfilerRequestHandler.cpp index af468f0cf3..3ae452806c 100644 --- a/src/Server/ProfilerRequestHandler.cpp +++ b/src/Server/ProfilerRequestHandler.cpp @@ -62,7 +62,7 @@ void ProfilerRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServ { const auto & uri = request.getURI(); - LOG_INFO(&Poco::Logger::get("ProfilerHttp"), "fetching {}", uri); + LOG_INFO(getLogger("ProfilerHttp"), "fetching {}", uri); std::string resource_name, content_type; extractResourceAndType(uri, resource_name, content_type); @@ -80,7 +80,7 @@ void ProfilerRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServ response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_OK); *response.send() << content; - LOG_INFO(&Poco::Logger::get("ProfilerHttp"), "return {}, type {}", resource_name, content_type); + LOG_INFO(getLogger("ProfilerHttp"), "return {}, type {}", resource_name, content_type); } } diff --git a/src/Server/ReplicasStatusHandler.cpp b/src/Server/ReplicasStatusHandler.cpp index 691987f9ae..860ee84136 100644 --- a/src/Server/ReplicasStatusHandler.cpp +++ b/src/Server/ReplicasStatusHandler.cpp @@ -103,7 +103,7 @@ void ReplicasStatusHandler::handleRequest(HTTPServerRequest & request, HTTPServe } catch (...) { - LOG_ERROR((&Poco::Logger::get("ReplicasStatusHandler")), "Cannot send exception to client"); + LOG_ERROR((getLogger("ReplicasStatusHandler")), "Cannot send exception to client"); } } } diff --git a/src/Server/ServerHelper.cpp b/src/Server/ServerHelper.cpp index 5c6373a2e0..d42667bc34 100644 --- a/src/Server/ServerHelper.cpp +++ b/src/Server/ServerHelper.cpp @@ -18,7 +18,7 @@ std::string getCanonicalPath(std::string && path) } std::string getUserName(uid_t user_id); -void setupTmpPath(Poco::Logger * log, const std::string & path) +void setupTmpPath(LoggerPtr log, const std::string & path) { LOG_DEBUG(log, "Setting up {} to store temporary data in it", path); diff --git a/src/Server/ServerHelper.h b/src/Server/ServerHelper.h index 41d38cb4e9..2fa0130c0a 100644 --- a/src/Server/ServerHelper.h +++ b/src/Server/ServerHelper.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -17,5 +18,5 @@ std::string getCanonicalPath(std::string && path); /** * @brief Setup temporary path for later use. */ -void setupTmpPath(Poco::Logger * log, const std::string & path); +void setupTmpPath(LoggerPtr log, const std::string & path); } diff --git a/src/Server/ServerPrometheusMetricsWriter.cpp b/src/Server/ServerPrometheusMetricsWriter.cpp index c71eecf333..95d9e88495 100644 --- a/src/Server/ServerPrometheusMetricsWriter.cpp +++ b/src/Server/ServerPrometheusMetricsWriter.cpp @@ -161,7 +161,7 @@ void ServerPrometheusMetricsWriter::writeConfigMetrics(WriteBuffer & wb) } else { - LOG_WARNING(&Poco::Logger::get("ServerPrometheusMetricsWriter"), "Unknown config metric found, this should never happen"); + LOG_WARNING(getLogger("ServerPrometheusMetricsWriter"), "Unknown config metric found, this should never happen"); writeOutLine(wb, key_label, 0); } } @@ -413,7 +413,7 @@ void ServerPrometheusMetricsWriter::writePartMetrics(WriteBuffer & wb) if (!cnch_catalog) { - LOG_WARNING(&Poco::Logger::get("ServerPrometheusMetricsWriter"), "Cannot get catalog for part metrics"); + LOG_WARNING(getLogger("ServerPrometheusMetricsWriter"), "Cannot get catalog for part metrics"); } else { diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index e3211a607f..fa0e67263c 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -136,7 +136,7 @@ TCPHandler::TCPHandler( : Poco::Net::TCPServerConnection(socket_) , server(server_) , parse_proxy_protocol(parse_proxy_protocol_) - , log(&Poco::Logger::get("TCPHandler")) + , log(getLogger("TCPHandler")) , connection_context(Context::createCopy(server.context())) , query_context(Context::createCopy(server.context())) , server_display_name(std::move(server_display_name_)) diff --git a/src/Server/TCPHandler.h b/src/Server/TCPHandler.h index fbbd1be7dc..90a6d2cc47 100644 --- a/src/Server/TCPHandler.h +++ b/src/Server/TCPHandler.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include @@ -156,7 +157,7 @@ public: private: IServer & server; bool parse_proxy_protocol = false; - Poco::Logger * log; + LoggerPtr log; String client_name; UInt64 client_version_major = 0; diff --git a/src/Server/TCPHandlerFactory.h b/src/Server/TCPHandlerFactory.h index 13ef4539a8..ff6a7abc6c 100644 --- a/src/Server/TCPHandlerFactory.h +++ b/src/Server/TCPHandlerFactory.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -37,7 +38,7 @@ class TCPHandlerFactory : public Poco::Net::TCPServerConnectionFactory private: IServer & server; bool parse_proxy_protocol = false; - Poco::Logger * log; + LoggerPtr log; std::string server_display_name; class DummyTCPHandler : public Poco::Net::TCPServerConnection @@ -54,7 +55,7 @@ public: */ TCPHandlerFactory(IServer & server_, bool secure_, bool parse_proxy_protocol_) : server(server_), parse_proxy_protocol(parse_proxy_protocol_) - , log(&Poco::Logger::get(std::string("TCP") + (secure_ ? "S" : "") + "HandlerFactory")) + , log(getLogger(std::string("TCP") + (secure_ ? "S" : "") + "HandlerFactory")) { server_display_name = server.config().getString("display_name", getIPOrFQDNOrHostName()); } diff --git a/src/ServiceDiscovery/ServiceDiscoveryConsul.h b/src/ServiceDiscovery/ServiceDiscoveryConsul.h index 411e3c2dda..cb9f9c0a57 100644 --- a/src/ServiceDiscovery/ServiceDiscoveryConsul.h +++ b/src/ServiceDiscovery/ServiceDiscoveryConsul.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -52,7 +53,7 @@ public: static HostWithPortsVec formatResult(const Endpoints & eps, ComponentType type); private: - Poco::Logger * log = &Poco::Logger::get("ServiceDiscoveryConsul"); + LoggerPtr log = getLogger("ServiceDiscoveryConsul"); ServiceDiscoveryCache cache; bool passCheckCluster(const Endpoint & e); diff --git a/src/ServiceDiscovery/ServiceDiscoveryDNS.h b/src/ServiceDiscovery/ServiceDiscoveryDNS.h index 3b87e59d58..9b6951be2e 100644 --- a/src/ServiceDiscovery/ServiceDiscoveryDNS.h +++ b/src/ServiceDiscovery/ServiceDiscoveryDNS.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -85,7 +86,7 @@ public: private: std::map serviceMap; // psm -> k8s Service pair - Poco::Logger * log = &Poco::Logger::get("ServiceDiscoveryDNS"); + LoggerPtr log = getLogger("ServiceDiscoveryDNS"); std::vector generalLookup(ServicePair & service_pair, const String & port_name); diff --git a/src/Statistics/ASTHelpers.cpp b/src/Statistics/ASTHelpers.cpp index 0687af3a21..eb9c97adce 100644 --- a/src/Statistics/ASTHelpers.cpp +++ b/src/Statistics/ASTHelpers.cpp @@ -77,7 +77,7 @@ std::vector getTablesFromScope(ContextPtr context, const S mv->getTableName(), mv->getTargetDatabaseName(), mv->getTargetTableName()); - LOG_WARNING(&Poco::Logger::get("ShowStats"), err_msg); + LOG_WARNING(getLogger("ShowStats"), err_msg); continue; } table = table_opt.value(); diff --git a/src/Statistics/AutoStatisticsManager.cpp b/src/Statistics/AutoStatisticsManager.cpp index caccae515b..6a8c6d28c0 100644 --- a/src/Statistics/AutoStatisticsManager.cpp +++ b/src/Statistics/AutoStatisticsManager.cpp @@ -87,7 +87,7 @@ void AutoStatisticsManager::prepareNewConfig(const Poco::Util::AbstractConfigura AutoStatisticsManager::AutoStatisticsManager(ContextPtr context_) : WithContext(context_) - , logger(&Poco::Logger::get("AutoStatisticsManager")) + , logger(getLogger("AutoStatisticsManager")) , task_queue(context_, internal_config) , settings_manager(context_) , schedule_lease(TimePoint{}) // just make compiler happy @@ -559,7 +559,7 @@ void AutoStatisticsManager::initialize(ContextMutablePtr context_, const Poco::U } else { - LOG_WARNING(&Poco::Logger::get("AutoStatisticsManager::initialize"), "cnch_system.cnch_auto_stats_task_log is not initialized"); + LOG_WARNING(getLogger("AutoStatisticsManager::initialize"), "cnch_system.cnch_auto_stats_task_log is not initialized"); } // zk helper will make only one manager runs at the whole cluster diff --git a/src/Statistics/AutoStatisticsManager.h b/src/Statistics/AutoStatisticsManager.h index 9ac588cee8..0243cad318 100644 --- a/src/Statistics/AutoStatisticsManager.h +++ b/src/Statistics/AutoStatisticsManager.h @@ -1,4 +1,5 @@ #pragma once +#include #include #include #include @@ -76,7 +77,7 @@ private: void logTaskIfNeeded(const StatsTableIdentifier & table, UInt64 udi_count, UInt64 stats_row_count); void createTask(const StatisticsScope & scope); - Poco::Logger * logger; + LoggerPtr logger; // we don't have lock to protect internal_config since it will be accessed only single-threaded InternalConfig internal_config; diff --git a/src/Statistics/AutoStatisticsTaskQueue.h b/src/Statistics/AutoStatisticsTaskQueue.h index 853d7d3e64..f630dd717f 100644 --- a/src/Statistics/AutoStatisticsTaskQueue.h +++ b/src/Statistics/AutoStatisticsTaskQueue.h @@ -1,4 +1,5 @@ #pragma once +#include #include #include #include @@ -162,7 +163,7 @@ private: // table uuid -> task std::unordered_map> task_infos; - Poco::Logger * logger = &Poco::Logger::get("AutoStats::TaskQueue"); + LoggerPtr logger = getLogger("AutoStats::TaskQueue"); const InternalConfig & internal_config; }; diff --git a/src/Statistics/AutoStatsTaskLogHelper.cpp b/src/Statistics/AutoStatsTaskLogHelper.cpp index 8cdf76f2e4..4d7e2c98f1 100644 --- a/src/Statistics/AutoStatsTaskLogHelper.cpp +++ b/src/Statistics/AutoStatsTaskLogHelper.cpp @@ -124,7 +124,7 @@ std::vector batchReadTaskLog(ContextPtr context, DateTime64 min_eve } LOG_DEBUG( - &Poco::Logger::get("AutoStatsTaskLogHelper"), + getLogger("AutoStatsTaskLogHelper"), fmt::format(FMT_STRING("batchReadTaskLog: read {} useful entries with min_event_time='{}'"), result.size(), event_time_str)); return result; diff --git a/src/Statistics/CacheManager.cpp b/src/Statistics/CacheManager.cpp index 795941d2f6..717fd4511d 100644 --- a/src/Statistics/CacheManager.cpp +++ b/src/Statistics/CacheManager.cpp @@ -28,7 +28,7 @@ void CacheManager::initialize(ContextPtr context) { if (cache) { - LOG_WARNING(&Poco::Logger::get("CacheManager"), "CacheManager already initialized"); + LOG_WARNING(getLogger("CacheManager"), "CacheManager already initialized"); return; } auto max_size = context->getConfigRef().getUInt64("optimizer.statistics.max_cache_size", ConfigParameters::max_cache_size); diff --git a/src/Statistics/CollectStep.h b/src/Statistics/CollectStep.h index f806001427..c060e299d7 100644 --- a/src/Statistics/CollectStep.h +++ b/src/Statistics/CollectStep.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -94,7 +95,7 @@ protected: CatalogAdaptorPtr catalog; ContextPtr context; HandlerContext handler_context; - Poco::Logger * logger = &Poco::Logger::get("Statistics::CollectStep"); + LoggerPtr logger = getLogger("Statistics::CollectStep"); }; diff --git a/src/Statistics/FullCollectStep.cpp b/src/Statistics/FullCollectStep.cpp index 36aa189f1d..30d603b567 100644 --- a/src/Statistics/FullCollectStep.cpp +++ b/src/Statistics/FullCollectStep.cpp @@ -70,7 +70,7 @@ public: // to estimate ndv LOG_INFO( - &Poco::Logger::get("FirstFullColumnHandler"), + getLogger("FirstFullColumnHandler"), fmt::format( FMT_STRING("col info: col={} && " "sqls={}"), @@ -113,7 +113,7 @@ public: result.bucket_bounds = histogram->getBucketBounds(histogram_bucket_size); } LOG_INFO( - &Poco::Logger::get("FirstFullColumnHandler"), + getLogger("FirstFullColumnHandler"), fmt::format( FMT_STRING("col info: col={} && " "context raw data: full_count={} && " diff --git a/src/Statistics/OptimizerStatisticsClient.cpp b/src/Statistics/OptimizerStatisticsClient.cpp index 6334fca029..3bf80751f2 100644 --- a/src/Statistics/OptimizerStatisticsClient.cpp +++ b/src/Statistics/OptimizerStatisticsClient.cpp @@ -28,7 +28,7 @@ void refreshClusterStatsCache(ContextPtr context, const StatsTableIdentifier & t { Protos::RefreshStatisticsCacheRequest req; RPCHelpers::fillStorageID(table_identifier.getStorageID(), *req.add_tables()); - auto * log = &Poco::Logger::get("refreshClusterStatsCache"); + auto log = getLogger("refreshClusterStatsCache"); req.set_mode(mode); LOG_INFO(log, "refresh statistics on {}", table_identifier.getNameForLogs()); @@ -72,7 +72,7 @@ StatisticsSettings fetchStatisticsSettings(ContextPtr context) (void)context; throw Exception("not implemented", ErrorCodes::NOT_IMPLEMENTED); #if 0 - auto * log = &Poco::Logger::get("fetchStatisticsSettings"); + auto log = getLogger("fetchStatisticsSettings"); Protos::FetchStatisticsSettingsRequest req; auto * manager = context->getAutoStatisticsManager(); auto leader_addr = manager->getZkHelper().getLeaderAddr(); @@ -93,7 +93,7 @@ StatisticsSettings fetchStatisticsSettings(ContextPtr context) // std::map queryUdiCounter(ContextPtr context) // { // (void)context; -// auto * log = &Poco::Logger::get("queryUdiCounter"); +// auto log = getLogger("queryUdiCounter"); // std::map result; diff --git a/src/Statistics/OptimizerStatisticsService.h b/src/Statistics/OptimizerStatisticsService.h index f91b7e1d02..731442998a 100644 --- a/src/Statistics/OptimizerStatisticsService.h +++ b/src/Statistics/OptimizerStatisticsService.h @@ -1,6 +1,7 @@ #pragma once +#include #include #include #include @@ -30,7 +31,7 @@ public: google::protobuf::Closure * done) override; private: - Poco::Logger * log = &Poco::Logger::get("OptimizerStatisticsService"); + LoggerPtr log = getLogger("OptimizerStatisticsService"); }; diff --git a/src/Statistics/SampleCollectStep.cpp b/src/Statistics/SampleCollectStep.cpp index 7b742b0d2a..3b400724f6 100644 --- a/src/Statistics/SampleCollectStep.cpp +++ b/src/Statistics/SampleCollectStep.cpp @@ -86,7 +86,7 @@ public: // to estimate ndv LOG_INFO( - &Poco::Logger::get("FirstSampleColumnHandler"), + getLogger("FirstSampleColumnHandler"), fmt::format( FMT_STRING("col info: col={} && " "sqls={}"), @@ -147,7 +147,7 @@ public: auto estimated_ndv_upper_bound = scaleNdv(full_count, sample_row_count, sample_ndv_ub, block_ndv); LOG_INFO( - &Poco::Logger::get("ThirdSampleColumnHandler"), + getLogger("ThirdSampleColumnHandler"), fmt::format( FMT_STRING("estimated_ndv={}, estimated_ndv_low_bound={}, estimated_ndv_upper_bound={}"), estimated_ndv, @@ -209,7 +209,7 @@ public: } LOG_INFO( - &Poco::Logger::get("FirstSampleColumnHandler"), + getLogger("FirstSampleColumnHandler"), fmt::format( FMT_STRING("col info: col={} && " "context raw data: full_count={}, sample_row_count={} && " @@ -513,7 +513,7 @@ public: { auto & col_data = handler_context.columns_data.at(col_desc.name); auto full_sql = constructThirdSql(handler_context.settings, table_info, col_desc, col_data.bucket_bounds, getSampleTail(true)); - LOG_INFO(&Poco::Logger::get("thirdSampleColumnHandler"), full_sql); + LOG_INFO(getLogger("thirdSampleColumnHandler"), full_sql); auto helper = SubqueryHelper::create(context, full_sql, true); Block block; diff --git a/src/Statistics/SettingsManager.cpp b/src/Statistics/SettingsManager.cpp index 2d2d889f24..7352f45c91 100644 --- a/src/Statistics/SettingsManager.cpp +++ b/src/Statistics/SettingsManager.cpp @@ -96,7 +96,7 @@ std::unordered_map parseAllKeyValue(const Poco::Util::AbstractCo return result; } -SettingsChanges mapToChanges(const std::unordered_map & kv_map, Poco::Logger * logger) +SettingsChanges mapToChanges(const std::unordered_map & kv_map, LoggerPtr logger) { SettingsChanges changes; for (const auto & [k, v] : kv_map) @@ -123,7 +123,7 @@ bool stringToBool(const std::string& str) { void SettingsManager::loadSettingsFromXml(const Poco::Util::AbstractConfiguration & config) { - auto * logger = &Poco::Logger::get("Statistics::AutoStats::SettingsManager"); + auto logger = getLogger("Statistics::AutoStats::SettingsManager"); try { bool any_is_enabled = false; diff --git a/src/Statistics/StatisticsCollector.cpp b/src/Statistics/StatisticsCollector.cpp index 4198890e89..3bbe582488 100644 --- a/src/Statistics/StatisticsCollector.cpp +++ b/src/Statistics/StatisticsCollector.cpp @@ -69,7 +69,7 @@ StatisticsCollector::StatisticsCollector( settings.set_enable_sample(false); } - logger = &Poco::Logger::get("StatisticsLogger" + table_info.getDbTableName()); + logger = getLogger("StatisticsLogger" + table_info.getDbTableName()); } diff --git a/src/Statistics/StatisticsCollector.h b/src/Statistics/StatisticsCollector.h index a73cd9f0bd..0e2f76025f 100644 --- a/src/Statistics/StatisticsCollector.h +++ b/src/Statistics/StatisticsCollector.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -58,7 +59,7 @@ public: private: ContextPtr context; - Poco::Logger * logger; + LoggerPtr logger; CatalogAdaptorPtr catalog; StatsTableIdentifier table_info; diff --git a/src/Statistics/SubqueryHelper.cpp b/src/Statistics/SubqueryHelper.cpp index 3f6bde5733..fe6da9c1c8 100644 --- a/src/Statistics/SubqueryHelper.cpp +++ b/src/Statistics/SubqueryHelper.cpp @@ -109,7 +109,7 @@ SubqueryHelper::SubqueryHelper(std::unique_ptr impl_) : impl(std::move SubqueryHelper SubqueryHelper::create(ContextPtr context, const String & sql, bool large_sql) { - LOG_TRACE(&Poco::Logger::get("create stats subquery"), "collect stats with sql: " + sql); + LOG_TRACE(getLogger("create stats subquery"), "collect stats with sql: " + sql); auto impl = std::make_unique(); impl->large_sql = large_sql; impl->old_context = context; diff --git a/src/Statistics/TableHandler.cpp b/src/Statistics/TableHandler.cpp index 9cb2d7c47e..3a46c7c78f 100644 --- a/src/Statistics/TableHandler.cpp +++ b/src/Statistics/TableHandler.cpp @@ -35,13 +35,13 @@ String TableHandler::getFullSql() sql_components.insert(sql_components.end(), sqls.begin(), sqls.end()); } auto full_sql = fmt::format(FMT_STRING("select {} from {}"), fmt::join(sql_components, ", "), table_identifier.getDbTableName()); - LOG_INFO(&Poco::Logger::get("TableHandler"), "full_sql={}", full_sql); + LOG_INFO(getLogger("TableHandler"), "full_sql={}", full_sql); return full_sql; } void TableHandler::parse(const Block & block) { - LOG_INFO(&Poco::Logger::get("TableHandler"), "table={}", table_identifier.getDbTableName()); + LOG_INFO(getLogger("TableHandler"), "table={}", table_identifier.getDbTableName()); if (block.columns() != column_size) { throw Exception("fetched block has wrong column size", ErrorCodes::LOGICAL_ERROR); diff --git a/src/Statistics/VersionHelper.cpp b/src/Statistics/VersionHelper.cpp index 8f785b05a6..07779bf5ff 100644 --- a/src/Statistics/VersionHelper.cpp +++ b/src/Statistics/VersionHelper.cpp @@ -36,7 +36,7 @@ std::optional getVersion(ContextPtr context, const StatsTableIdentif } catch (...) { - tryLogCurrentException(&Poco::Logger::get("Statistics::getVersion")); + tryLogCurrentException(getLogger("Statistics::getVersion")); return std::nullopt; } } diff --git a/src/Storages/CnchTablePartitionMetrics.cpp b/src/Storages/CnchTablePartitionMetrics.cpp index 9fd5973509..600d4370e4 100644 --- a/src/Storages/CnchTablePartitionMetrics.cpp +++ b/src/Storages/CnchTablePartitionMetrics.cpp @@ -86,7 +86,7 @@ void PartitionMetrics::restoreFromSnapshot(const PartitionMetrics & other) if (shutdown) return; - auto * log = &Poco::Logger::get("PartitionMetrics"); + auto log = getLogger("PartitionMetrics"); auto expected_value = false; if (!recalculating.compare_exchange_strong(expected_value, true)) @@ -106,7 +106,7 @@ bool PartitionMetrics::recalculate(size_t current_time, ContextPtr context, bool { if (shutdown) return false; - auto * log = &Poco::Logger::get("PartitionMetrics"); + auto log = getLogger("PartitionMetrics"); auto expected_value = false; if (!recalculating.compare_exchange_strong(expected_value, true)) @@ -216,7 +216,7 @@ void PartitionMetrics::recalculateBottomHalf(ContextPtr context) if (shutdown) return; size_t current_time = recalculate_current_time; - auto * log = &Poco::Logger::get("PartitionMetrics"); + auto log = getLogger("PartitionMetrics"); LOG_TRACE(log, "{} Recalculate bottom half.", getTraceID()); if (!old_store.has_value()) @@ -350,7 +350,7 @@ void TableMetrics::restoreFromSnapshot(Protos::TableTrashItemsMetricsSnapshot & if (shutdown) return; - auto * log = &Poco::Logger::get("TableMetrics"); + auto log = getLogger("TableMetrics"); auto expected_value = false; if (!recalculating.compare_exchange_strong(expected_value, true)) @@ -369,7 +369,7 @@ void TableMetrics::recalculate(size_t current_time, ContextPtr context, bool for { if (shutdown) return; - auto * log = &Poco::Logger::get("TableMetrics"); + auto log = getLogger("TableMetrics"); auto expected_value = false; if (!recalculating.compare_exchange_strong(expected_value, true)) diff --git a/src/Storages/CnchTablePartitionMetricsHelper.cpp b/src/Storages/CnchTablePartitionMetricsHelper.cpp index a48f36ad61..6aaf66dee1 100644 --- a/src/Storages/CnchTablePartitionMetricsHelper.cpp +++ b/src/Storages/CnchTablePartitionMetricsHelper.cpp @@ -15,7 +15,7 @@ namespace DB CnchTablePartitionMetricsHelper::CnchTablePartitionMetricsHelper(ContextPtr context_) - : WithContext(context_), log(&Poco::Logger::get("CnchTablePartitionMetricsHelper")) + : WithContext(context_), log(getLogger("CnchTablePartitionMetricsHelper")) { metrics_updater = getContext()->getMetricsRecalculationSchedulePool().createTask("PartMetricsUpdater", [this]() { try diff --git a/src/Storages/CnchTablePartitionMetricsHelper.h b/src/Storages/CnchTablePartitionMetricsHelper.h index a31e8ab6e6..94e82fb000 100644 --- a/src/Storages/CnchTablePartitionMetricsHelper.h +++ b/src/Storages/CnchTablePartitionMetricsHelper.h @@ -1,5 +1,6 @@ #pragma once #include +#include #include #include #include @@ -84,7 +85,7 @@ private: 1000 * getContext()->getSettingsRef().part_cache_manager_thread_pool_size}; ThreadPool & getTablePartitionThreadPool() { return table_partition_thread_pool; } - Poco::Logger * log; + LoggerPtr log; /** * @brief Trigger a recalculation of both CNCH parts and trash items diff --git a/src/Storages/DataLakes/PaimonCommon.h b/src/Storages/DataLakes/PaimonCommon.h index a8e680a77e..929c58d3ff 100644 --- a/src/Storages/DataLakes/PaimonCommon.h +++ b/src/Storages/DataLakes/PaimonCommon.h @@ -1,5 +1,6 @@ #pragma once +#include #include #if USE_HIVE and USE_JAVA_EXTENSIONS @@ -65,7 +66,7 @@ protected: std::shared_ptr jni_client; private: - Poco::Logger * log{&Poco::Logger::get("PaimonCatalogClient")}; + LoggerPtr log{getLogger("PaimonCatalogClient")}; }; class PaimonHiveCatalogClient final : public PaimonCatalogClient, public shared_ptr_helper @@ -80,7 +81,7 @@ protected: private: void parseMetastoreUrl(); - Poco::Logger * log{&Poco::Logger::get("PaimonHiveCatalogClient")}; + LoggerPtr log{getLogger("PaimonHiveCatalogClient")}; const String metastore_url; const String warehouse; @@ -97,7 +98,7 @@ protected: Poco::JSON::Object buildCatalogParams() override; private: - Poco::Logger * log{&Poco::Logger::get("PaimonHDFSCatalogClient")}; + LoggerPtr log{getLogger("PaimonHDFSCatalogClient")}; const String warehouse; }; @@ -113,7 +114,7 @@ protected: Poco::JSON::Object buildCatalogParams() override; private: - Poco::Logger * log{&Poco::Logger::get("PaimonLocalFilesystemCatalogClient")}; + LoggerPtr log{getLogger("PaimonLocalFilesystemCatalogClient")}; const String path; }; @@ -129,7 +130,7 @@ protected: Poco::JSON::Object buildCatalogParams() override; private: - Poco::Logger * log{&Poco::Logger::get("PaimonS3CatalogClient")}; + LoggerPtr log{getLogger("PaimonS3CatalogClient")}; const String warehouse; }; @@ -225,7 +226,7 @@ namespace paimon_utils private: void visitChildNode(ASTPtr & node, Predicate2RPNContext & context); - Poco::Logger * log{&Poco::Logger::get("Predicate2RPNConverter")}; + LoggerPtr log{getLogger("Predicate2RPNConverter")}; }; class PaimonSchemaConverter : WithContext @@ -242,7 +243,7 @@ namespace paimon_utils ASTPtr buildPartitionDef() const; static DataTypePtr paimonType2CHType(Protos::Paimon::Type type); - Poco::Logger * log{&Poco::Logger::get("PaimonSchemaConverter")}; + LoggerPtr log{getLogger("PaimonSchemaConverter")}; const CnchHiveSettingsPtr storage_settings; Protos::Paimon::Schema schema; diff --git a/src/Storages/DataLakes/StorageCnchLakeBase.h b/src/Storages/DataLakes/StorageCnchLakeBase.h index fd2dfc510b..3b6630d1e2 100644 --- a/src/Storages/DataLakes/StorageCnchLakeBase.h +++ b/src/Storages/DataLakes/StorageCnchLakeBase.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -81,6 +82,6 @@ protected: std::shared_ptr storage_settings; private: - Poco::Logger * log{&Poco::Logger::get("CnchHive")}; + LoggerPtr log{getLogger("CnchHive")}; }; } diff --git a/src/Storages/DataLakes/StorageCnchLas.h b/src/Storages/DataLakes/StorageCnchLas.h index febd89cf39..3dd54d29d1 100644 --- a/src/Storages/DataLakes/StorageCnchLas.h +++ b/src/Storages/DataLakes/StorageCnchLas.h @@ -1,4 +1,5 @@ #pragma once +#include #include "Common/config.h" #if USE_HIVE and USE_JAVA_EXTENSIONS @@ -37,7 +38,7 @@ private: Strings getHiveColumnTypes() const; JNIHiveMetastoreClient * jni_meta_client = nullptr; - Poco::Logger * log {&Poco::Logger::get("CnchLas")}; + LoggerPtr log {getLogger("CnchLas")}; }; } diff --git a/src/Storages/DataLakes/StorageCnchPaimon.h b/src/Storages/DataLakes/StorageCnchPaimon.h index ce0e9ca39b..010431f059 100644 --- a/src/Storages/DataLakes/StorageCnchPaimon.h +++ b/src/Storages/DataLakes/StorageCnchPaimon.h @@ -1,5 +1,6 @@ #pragma once +#include #include #if USE_HIVE and USE_JAVA_EXTENSIONS @@ -48,7 +49,7 @@ protected: unsigned num_streams) override; private: - Poco::Logger * log{&Poco::Logger::get("StoragePaimonCluster")}; + LoggerPtr log{getLogger("StoragePaimonCluster")}; PaimonCatalogClientPtr catalog_client; diff --git a/src/Storages/DiskCache/Allocator.h b/src/Storages/DiskCache/Allocator.h index cff5d93bd8..a11591e24c 100644 --- a/src/Storages/DiskCache/Allocator.h +++ b/src/Storages/DiskCache/Allocator.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -64,7 +65,7 @@ public: void flush(); private: - Poco::Logger * log = &Poco::Logger::get("BlockCacheAllocator"); + LoggerPtr log = getLogger("BlockCacheAllocator"); void flushAndReleaseRegionFromRALocked(RegionAllocator & ra, bool flushAsync); diff --git a/src/Storages/DiskCache/BigHash.h b/src/Storages/DiskCache/BigHash.h index 8ac14dfe7f..83a4e87e7a 100644 --- a/src/Storages/DiskCache/BigHash.h +++ b/src/Storages/DiskCache/BigHash.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -80,7 +81,7 @@ public: std::pair getRandomAlloc(Buffer & value) override; private: - Poco::Logger * log = &Poco::Logger::get("BigHash"); + LoggerPtr log = getLogger("BigHash"); STRONG_TYPEDEF(UInt32, BucketId) diff --git a/src/Storages/DiskCache/BitmapIndexDiskCacheSegment.cpp b/src/Storages/DiskCache/BitmapIndexDiskCacheSegment.cpp index c88e1afead..7fae39e707 100644 --- a/src/Storages/DiskCache/BitmapIndexDiskCacheSegment.cpp +++ b/src/Storages/DiskCache/BitmapIndexDiskCacheSegment.cpp @@ -43,7 +43,7 @@ String BitmapIndexDiskCacheSegment::getSegmentKey(const IMergeTreeDataPartPtr & void BitmapIndexDiskCacheSegment::cacheToDisk(IDiskCache & disk_cache, bool) { - Poco::Logger * log = disk_cache.getLogger(); + LoggerPtr log = disk_cache.getLogger(); auto disk = data_part->volume->getDisk(); std::unique_ptr segment_file = disk->readFile( data_path, read_settings diff --git a/src/Storages/DiskCache/BlockCache.h b/src/Storages/DiskCache/BlockCache.h index 80ca4b9a63..9fab72e538 100644 --- a/src/Storages/DiskCache/BlockCache.h +++ b/src/Storages/DiskCache/BlockCache.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -119,7 +120,7 @@ public: static constexpr UInt32 kMaxItemSize = kMinAllocAlignSize * static_cast(std::numeric_limits::max()); private: - Poco::Logger * log = &Poco::Logger::get("BlockCache"); + LoggerPtr log = getLogger("BlockCache"); static constexpr UInt32 kFormatVersion = 12; // Should be at least the next_two_pow(sizeof(EntryDesc)). diff --git a/src/Storages/DiskCache/Device.cpp b/src/Storages/DiskCache/Device.cpp index 430cae054e..8aa6b9f705 100644 --- a/src/Storages/DiskCache/Device.cpp +++ b/src/Storages/DiskCache/Device.cpp @@ -7,7 +7,6 @@ #include #include -#include #include #include @@ -45,7 +44,6 @@ extern const Event DiskCacheDeviceReadIOLatency; namespace DB::HybridCache { -Poco::Logger * Device::logger_{nullptr}; Device::Device(UInt64 size_, UInt32 io_align_size_, UInt32 max_write_size_) : size(size_), io_alignment_size(io_align_size_), max_write_size(max_write_size_) @@ -55,8 +53,6 @@ Device::Device(UInt64 size_, UInt32 io_align_size_, UInt32 max_write_size_) if (max_write_size % io_alignment_size != 0) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid max_write_size: {}, io_align_size: {}", max_write_size, io_alignment_size); - - logger_ = &Poco::Logger::get("Device"); } namespace @@ -257,7 +253,7 @@ namespace using WaiterList = folly::SafeIntrusiveList; - Poco::Logger * log = &Poco::Logger::get("AsyncIoContext"); + LoggerPtr log = getLogger("AsyncIoContext"); std::unique_ptr async_base; // Sequential id assigned to this context @@ -460,19 +456,20 @@ namespace bool result = (status == size); if (!result) - Device::logger().error( - fmt::format("[{}] IO error: {} ret={}, {}", parent.context.getName(), toString(), status, std::strerror(-status))); + LOG_ERROR(getLogger("Device"), + "[{}] IO error: {} ret={}, {}", + parent.context.getName(), toString(), status, std::strerror(-status)) ; auto cur_time = getSteadyClock(); auto delay_ms = toMillis(cur_time - start_time).count(); if (delay_ms > static_cast(kIOTimeoutMs)) - Device::logger().error(fmt::format( + LOG_ERROR(getLogger("Device"), "[{}] IO timeout {}ms (submit +{}ms comp +{}ms): {}", parent.context.getName(), delay_ms, toMillis(submit_time - start_time).count(), toMillis(cur_time - submit_time).count(), - toString())); + toString()); parent.notifyOpResult(result); return result; @@ -529,13 +526,13 @@ namespace delay_ms = toMillis(cur_time - comp_time).count(); if (delay_ms > static_cast(kIOTimeoutMs)) - Device::logger().error(fmt::format( + LOG_ERROR(getLogger("Device"), "[{}] IOReq timeout {}ms (comp +{}ms notify +{}ms): {}", context.getName(), delay_ms, toMillis(comp_time - start_time).count(), toMillis(cur_time - comp_time).count(), - toString())); + toString()); return result; } @@ -787,7 +784,7 @@ namespace // (e.g., recovery path, read random alloc path) sync_io_context = std::make_unique(); - Device::logger().information(fmt::format( + LOG_INFO(getLogger("Device"), "Created device with num_devices {} size {} block_size {} stripe_size {} max_write_size {} io_engine {} qdepth {}", fvec.size(), getSize(), @@ -795,7 +792,7 @@ namespace stripe_size, max_device_write_size, getIoEngineName(io_engine), - q_depth_per_context)); + q_depth_per_context); } bool FileDevice::readImpl(UInt64 offset, UInt32 size, void * value) diff --git a/src/Storages/DiskCache/Device.h b/src/Storages/DiskCache/Device.h index 9521f39673..fb1db01943 100644 --- a/src/Storages/DiskCache/Device.h +++ b/src/Storages/DiskCache/Device.h @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include @@ -48,16 +47,12 @@ public: UInt32 getIOAlignmentSize() const { return io_alignment_size; } - static Poco::Logger & logger(); - protected: virtual bool writeImpl(UInt64 offset, UInt32 size, const void * value) = 0; virtual bool readImpl(UInt64 offset, UInt32 size, void * value) = 0; virtual void flushImpl() = 0; private: - static Poco::Logger * logger_; - bool readInternal(UInt64 offset, UInt32 size, void * data); bool writeInternal(UInt64 offset, const UInt8 * data, size_t size); @@ -84,9 +79,4 @@ std::unique_ptr createDirectIoFileDevice( IoEngine io_engine = IoEngine::Sync, UInt32 q_depth = 0); -inline Poco::Logger & Device::logger() -{ - chassert(logger_ != nullptr); - return *logger_; -} } diff --git a/src/Storages/DiskCache/DiskCacheFactory.cpp b/src/Storages/DiskCache/DiskCacheFactory.cpp index 071e2f55ca..ed3194eff4 100644 --- a/src/Storages/DiskCache/DiskCacheFactory.cpp +++ b/src/Storages/DiskCache/DiskCacheFactory.cpp @@ -37,7 +37,7 @@ void DiskCacheFactory::init(Context & context) /// init pool IDiskCache::init(context); - Poco::Logger * log{&Poco::Logger::get("DiskCacheFactory")}; + LoggerPtr log{getLogger("DiskCacheFactory")}; // build disk cache for each type if (config.has(DiskCacheSettings::root)) @@ -110,7 +110,7 @@ void DiskCacheFactory::shutdown() void DiskCacheFactory::addNewCache(Context & context, const std::string & cache_name, bool create_default) { - Poco::Logger * log{&Poco::Logger::get("DiskCacheFactory")}; + LoggerPtr log{getLogger("DiskCacheFactory")}; DiskCacheSettings cache_settings; auto throttler = context.getDiskCacheThrottler(); diff --git a/src/Storages/DiskCache/DiskCacheLRU.cpp b/src/Storages/DiskCache/DiskCacheLRU.cpp index 4b42bdf84e..9a19a450c9 100644 --- a/src/Storages/DiskCache/DiskCacheLRU.cpp +++ b/src/Storages/DiskCache/DiskCacheLRU.cpp @@ -528,7 +528,7 @@ DiskCacheLRU::DiskIterator::DiskIterator( , worker_per_disk(worker_per_disk_) , min_depth_parallel(min_depth_parallel_) , max_depth_parallel(max_depth_parallel_) - , log(&Poco::Logger::get(fmt::format("DiskIterator{}({})", name, disk_cache.getName()))) + , log(::getLogger(fmt::format("DiskIterator{}({})", name, disk_cache.getName()))) { } diff --git a/src/Storages/DiskCache/DiskCacheLRU.h b/src/Storages/DiskCache/DiskCacheLRU.h index e674710a94..e5f65dde0f 100644 --- a/src/Storages/DiskCache/DiskCacheLRU.h +++ b/src/Storages/DiskCache/DiskCacheLRU.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -128,7 +129,7 @@ private: std::unique_ptr pool; ExceptionHandler handler; - Poco::Logger * log; + LoggerPtr log; }; /// Load from disk when Disk cache starts up diff --git a/src/Storages/DiskCache/DiskCacheSimpleStrategy.h b/src/Storages/DiskCache/DiskCacheSimpleStrategy.h index 7e5cc27fe2..5b74174393 100644 --- a/src/Storages/DiskCache/DiskCacheSimpleStrategy.h +++ b/src/Storages/DiskCache/DiskCacheSimpleStrategy.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include "Storages/DiskCache/IDiskCache.h" @@ -31,7 +32,7 @@ public: : IDiskCacheStrategy(settings_) , cache_statistics(settings_.stats_bucket_size) , segment_hits_to_cache(settings_.hits_to_cache) - , logger(&Poco::Logger::get("DiskCacheSimpleStrategy")) + , logger(getLogger("DiskCacheSimpleStrategy")) { } @@ -56,7 +57,7 @@ private: CacheStatistics cache_statistics; size_t segment_hits_to_cache; - Poco::Logger * logger; + LoggerPtr logger; }; } diff --git a/src/Storages/DiskCache/FiberThread.h b/src/Storages/DiskCache/FiberThread.h index ff28c5aa41..63490d7d30 100644 --- a/src/Storages/DiskCache/FiberThread.h +++ b/src/Storages/DiskCache/FiberThread.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -85,7 +86,7 @@ public: FiberThread & operator=(const FiberThread & other) = delete; private: - Poco::Logger * log = &Poco::Logger::get("FiberThread"); + LoggerPtr log = getLogger("FiberThread"); String name; diff --git a/src/Storages/DiskCache/FifoPolicy.h b/src/Storages/DiskCache/FifoPolicy.h index 476d5cfacf..8a535460e8 100644 --- a/src/Storages/DiskCache/FifoPolicy.h +++ b/src/Storages/DiskCache/FifoPolicy.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -48,7 +49,7 @@ public: } private: - Poco::Logger * log = &Poco::Logger::get("FifoPolicy"); + LoggerPtr log = getLogger("FifoPolicy"); std::deque queue; mutable TimedMutex mutex; diff --git a/src/Storages/DiskCache/FileDiskCacheSegment.cpp b/src/Storages/DiskCache/FileDiskCacheSegment.cpp index 35563a3703..8962e098c7 100644 --- a/src/Storages/DiskCache/FileDiskCacheSegment.cpp +++ b/src/Storages/DiskCache/FileDiskCacheSegment.cpp @@ -21,7 +21,7 @@ String FileDiskCacheSegment::getSegmentName() const void FileDiskCacheSegment::cacheToDisk(IDiskCache & disk_cache, bool) { - Poco::Logger * log = disk_cache.getLogger(); + LoggerPtr log = disk_cache.getLogger(); try { diff --git a/src/Storages/DiskCache/IDiskCache.cpp b/src/Storages/DiskCache/IDiskCache.cpp index 776c15adad..8d3d1bc6c9 100644 --- a/src/Storages/DiskCache/IDiskCache.cpp +++ b/src/Storages/DiskCache/IDiskCache.cpp @@ -62,7 +62,7 @@ void IDiskCache::init(const Context & global_context) settings.local_disk_cache_evict_thread_pool_size, settings.local_disk_cache_evict_thread_pool_size, settings.local_disk_cache_evict_thread_pool_size * 100); - + local_disk_cache_preload_thread_pool = std::make_unique( settings.cnch_parallel_preloading, settings.cnch_parallel_preloading, @@ -117,7 +117,7 @@ IDiskCache::IDiskCache( , support_multi_cache(support_multi_cache_) , type(type_) , name(name_) - , log(&Poco::Logger::get(fmt::format("DiskCache(name={})", getName()))) + , log(::getLogger(fmt::format("DiskCache(name={})", getName()))) { if (!settings.previous_disk_cache_dir.empty()) { diff --git a/src/Storages/DiskCache/IDiskCache.h b/src/Storages/DiskCache/IDiskCache.h index 3756560dc8..1a0661cfff 100644 --- a/src/Storages/DiskCache/IDiskCache.h +++ b/src/Storages/DiskCache/IDiskCache.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -106,7 +107,7 @@ public: VolumePtr getStorageVolume() const { return volume; } ThrottlerPtr getDiskCacheThrottler() const { return disk_cache_throttler; } - Poco::Logger * getLogger() const { return log; } + LoggerPtr getLogger() const { return log; } String getDataDir() const {return latest_disk_cache_dir;} virtual std::shared_ptr getMetaCache() { return shared_from_this(); } @@ -147,7 +148,7 @@ protected: IDiskCache::DataType type; String name; - Poco::Logger * log; + LoggerPtr log; private: bool scheduleCacheTask(const std::function & task); diff --git a/src/Storages/DiskCache/JobScheduler.h b/src/Storages/DiskCache/JobScheduler.h index 404a875079..994bc7d2ad 100644 --- a/src/Storages/DiskCache/JobScheduler.h +++ b/src/Storages/DiskCache/JobScheduler.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -91,7 +92,7 @@ public: Stats getStats() const; private: - Poco::Logger * log = &Poco::Logger::get("JobQueue"); + LoggerPtr log = getLogger("JobQueue"); struct QueueEntry { @@ -145,7 +146,7 @@ public: std::string_view getName() const { return name; } private: - Poco::Logger * log = &Poco::Logger::get("ThreadPoolExecutor"); + LoggerPtr log = getLogger("ThreadPoolExecutor"); const std::string_view name{}; std::atomic next_queue{0}; @@ -169,7 +170,7 @@ public: private: - Poco::Logger * log = &Poco::Logger::get("ThreadPoolJobScheduler"); + LoggerPtr log = getLogger("ThreadPoolJobScheduler"); void join(); @@ -305,7 +306,7 @@ private: // Actually submit the req to the worker thread void scheduleReq(std::unique_ptr req); - Poco::Logger * log = &Poco::Logger::get("NavyRequestDispatcher"); + LoggerPtr log = getLogger("NavyRequestDispatcher"); // The parent scheduler to get completion notification JobScheduler & scheduler; @@ -366,7 +367,7 @@ private: // Return the context for the key and type FiberRequestDispatcher & getDispatcher(uint64_t keyHash, JobType type); - Poco::Logger * log = &Poco::Logger::get("NavyRequestScheduler"); + LoggerPtr log = getLogger("NavyRequestScheduler"); const size_t num_reader_threads; const size_t num_writer_threads; diff --git a/src/Storages/DiskCache/KeyIndexFileCache.cpp b/src/Storages/DiskCache/KeyIndexFileCache.cpp index c0b4ca160a..7bf98a1e76 100644 --- a/src/Storages/DiskCache/KeyIndexFileCache.cpp +++ b/src/Storages/DiskCache/KeyIndexFileCache.cpp @@ -136,7 +136,7 @@ struct KeyIndexFileCache::Rep }; KeyIndexFileCache::KeyIndexFileCache(Context & context, UInt64 max_size) - : log(&Poco::Logger::get("KeyIndexFileCache")), rep(std::make_shared(context, max_size)) + : log(getLogger("KeyIndexFileCache")), rep(std::make_shared(context, max_size)) { initCacheFromFileSystem(); LOG_INFO(log, "Successfully init KeyIndexFileCache in path {} and KeyIndexFileCache max_size is {}", rep->base_path, max_size); @@ -192,7 +192,7 @@ int KeyIndexFileCache::get(const IndexFile::RemoteFileInfo & file) std::shared_ptr rep_inner = rep_wp.lock(); if (!rep_inner) { - LOG_WARNING(&Poco::Logger::get("KeyIndexFileCache"), "KeyIndexFileCache has been destory."); + LOG_WARNING(getLogger("KeyIndexFileCache"), "KeyIndexFileCache has been destory."); return; } try @@ -221,7 +221,7 @@ int KeyIndexFileCache::get(const IndexFile::RemoteFileInfo & file) catch (...) { LOG_ERROR( - &Poco::Logger::get("KeyIndexFileCache"), + getLogger("KeyIndexFileCache"), "Failed to cache {} to local disks: {}", String(std::filesystem::path(file.disk->getPath()) / file.rel_path), getCurrentExceptionMessage(false)); diff --git a/src/Storages/DiskCache/KeyIndexFileCache.h b/src/Storages/DiskCache/KeyIndexFileCache.h index ef1a532a91..8796922cc8 100644 --- a/src/Storages/DiskCache/KeyIndexFileCache.h +++ b/src/Storages/DiskCache/KeyIndexFileCache.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -42,7 +43,7 @@ public: private: void initCacheFromFileSystem(); - Poco::Logger * log; + LoggerPtr log; struct Rep; std::shared_ptr rep; }; diff --git a/src/Storages/DiskCache/LruPolicy.h b/src/Storages/DiskCache/LruPolicy.h index aeab4a3ba0..83cf751b0e 100644 --- a/src/Storages/DiskCache/LruPolicy.h +++ b/src/Storages/DiskCache/LruPolicy.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -35,7 +36,7 @@ public: size_t memorySize() const override; private: - Poco::Logger * log = &Poco::Logger::get("LruPolicy"); + LoggerPtr log = getLogger("LruPolicy"); static constexpr UInt32 kInvalidIndex = 0xffffffffu; diff --git a/src/Storages/DiskCache/NvmCache.h b/src/Storages/DiskCache/NvmCache.h index ea2a78a906..2505279096 100644 --- a/src/Storages/DiskCache/NvmCache.h +++ b/src/Storages/DiskCache/NvmCache.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -108,7 +109,7 @@ public: bool recover() override; private: - Poco::Logger * log = &Poco::Logger::get("NvmCache"); + LoggerPtr log = getLogger("NvmCache"); struct ValidConfigTag { diff --git a/src/Storages/DiskCache/NvmCacheConfig.cpp b/src/Storages/DiskCache/NvmCacheConfig.cpp index bc1a37bcaa..4ead55fc84 100644 --- a/src/Storages/DiskCache/NvmCacheConfig.cpp +++ b/src/Storages/DiskCache/NvmCacheConfig.cpp @@ -62,7 +62,7 @@ namespace File openCacheFile(const std::string & file_name, UInt64 size, bool truncate) { - LOG_INFO(&Poco::Logger::get("NvmCacheConfig"), "create file: {} sie: {}, truncate: {}", file_name, size, truncate); + LOG_INFO(getLogger("NvmCacheConfig"), "create file: {} sie: {}, truncate: {}", file_name, size, truncate); if (file_name.empty()) throw Exception(ErrorCodes::BAD_ARGUMENTS, "file name is empty"); @@ -78,7 +78,7 @@ namespace { if (e.getErrno() == EINVAL) { - LOG_ERROR(&Poco::Logger::get("NvmCacheConfig"), "failed to open with o_direct, error: {}", e.what()); + LOG_ERROR(getLogger("NvmCacheConfig"), "failed to open with o_direct, error: {}", e.what()); f = File(file_name.c_str(), flags); } else @@ -100,7 +100,7 @@ namespace ErrorCodes::CANNOT_TRUNCATE_FILE); LOG_INFO( - &Poco::Logger::get("NvmCacheConfig"), + getLogger("NvmCacheConfig"), "cache file {} is ftruncated from {} bytes to {} bytes", file_name, cur_file_size, @@ -132,7 +132,7 @@ namespace catch (const ErrnoException & e) { LOG_ERROR( - &Poco::Logger::get("NvmCacheConfig"), "Exception in openCacheFile {}, error: {} errno: {}", path, e.what(), errno); + getLogger("NvmCacheConfig"), "Exception in openCacheFile {}, error: {} errno: {}", path, e.what(), errno); throw; } file_vec.push_back(std::move(f)); @@ -186,7 +186,7 @@ namespace throw std::invalid_argument("NVM cache size is not big enough"); LOG_INFO( - &Poco::Logger::get("NvmCacheConfig"), + getLogger("NvmCacheConfig"), "big_hash_starting_limit: {}, big_hash_cache_offset: {}, big_hash_cache_size: {}", big_hash_start_offset_limit, big_hash_cache_offset, @@ -222,7 +222,7 @@ namespace } block_cache_size = alignDown(block_cache_size, region_size); - LOG_INFO(&Poco::Logger::get("NvmCacheConfig"), "blockcache: starting offset: {}, size: {}", block_cache_offset, block_cache_size); + LOG_INFO(getLogger("NvmCacheConfig"), "blockcache: starting offset: {}, size: {}", block_cache_offset, block_cache_size); auto block_cache = std::make_unique(); block_cache->setLayout(block_cache_offset, block_cache_size, region_size); @@ -276,10 +276,10 @@ namespace UInt64 big_hash_end_offset = total_cache_size; UInt64 big_hash_start_offset = 0; - LOG_INFO(&Poco::Logger::get("NvmCacheConfig"), "metadata size: {}", metadata_size); + LOG_INFO(getLogger("NvmCacheConfig"), "metadata size: {}", metadata_size); for (size_t idx = 0; idx < config.enginesConfigs().size(); idx++) { - LOG_INFO(&Poco::Logger::get("NvmCacheConfig"), "setting up engine pair {}", idx); + LOG_INFO(getLogger("NvmCacheConfig"), "setting up engine pair {}", idx); const auto & engines_config = config.enginesConfigs()[idx]; UInt64 block_cache_size = engines_config.blockCache().getSize(); auto engine_pair_proto = std::make_unique(); @@ -294,12 +294,12 @@ namespace big_hash_end_offset, block_cache_start_offset, *engine_pair_proto); - LOG_INFO(&Poco::Logger::get("NvmCacheConfig"), "block cache size: {}", block_cache_size); + LOG_INFO(getLogger("NvmCacheConfig"), "block cache size: {}", block_cache_size); } else { big_hash_start_offset = big_hash_end_offset; - LOG_INFO(&Poco::Logger::get("NvmCacheConfig"), "-- no bighash. block cache size: {}", block_cache_size); + LOG_INFO(getLogger("NvmCacheConfig"), "-- no bighash. block cache size: {}", block_cache_size); } if (block_cache_size > 0) @@ -389,7 +389,7 @@ std::unique_ptr createNvmCache( } if (!cache->recover()) - LOG_WARNING(&Poco::Logger::get("NvmCacheConfig"), "no recovery data found. setup with clean cache."); + LOG_WARNING(getLogger("NvmCacheConfig"), "no recovery data found. setup with clean cache."); return cache; } diff --git a/src/Storages/DiskCache/NvmCacheConfig.h b/src/Storages/DiskCache/NvmCacheConfig.h index 26ec947108..163a64f14e 100644 --- a/src/Storages/DiskCache/NvmCacheConfig.h +++ b/src/Storages/DiskCache/NvmCacheConfig.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -201,7 +202,7 @@ public: throw Exception(ErrorCodes::BAD_ARGUMENTS, "bighash size pct should be in range of [0, 100), but {} is set", size_pct_); if (size_pct_ == 0) - LOG_INFO(&Poco::Logger::get("NvmCacheConfig"), "BigHash is not configured"); + LOG_INFO(getLogger("NvmCacheConfig"), "BigHash is not configured"); size_pct = size_pct_; small_item_max_size = small_item_max_size_; diff --git a/src/Storages/DiskCache/PartFileDiskCacheSegment.cpp b/src/Storages/DiskCache/PartFileDiskCacheSegment.cpp index b2c895ffec..fdfa5bcbca 100644 --- a/src/Storages/DiskCache/PartFileDiskCacheSegment.cpp +++ b/src/Storages/DiskCache/PartFileDiskCacheSegment.cpp @@ -114,7 +114,7 @@ String PartFileDiskCacheSegment::getMarkName() const void PartFileDiskCacheSegment::cacheToDisk(IDiskCache & disk_cache, bool throw_exception) { - Poco::Logger * log = disk_cache.getLogger(); + LoggerPtr log = disk_cache.getLogger(); try { diff --git a/src/Storages/DiskCache/RegionManager.h b/src/Storages/DiskCache/RegionManager.h index b4cdefcd7b..b07247031a 100644 --- a/src/Storages/DiskCache/RegionManager.h +++ b/src/Storages/DiskCache/RegionManager.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -182,7 +183,7 @@ private: // Initializes the eviction policy. void resetEvictionPolicy(); - Poco::Logger * log = &Poco::Logger::get("RegionManager"); + LoggerPtr log = getLogger("RegionManager"); const UInt16 num_priorities{}; const UInt16 in_mem_buf_flush_retry_limit{}; diff --git a/src/Storages/DiskCache/tests/SeqPoints.h b/src/Storages/DiskCache/tests/SeqPoints.h index 24e3487dd2..26de2cb8a6 100644 --- a/src/Storages/DiskCache/tests/SeqPoints.h +++ b/src/Storages/DiskCache/tests/SeqPoints.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -56,7 +57,7 @@ public: } private: - Poco::Logger * log = &Poco::Logger::get("SeqPoints"); + LoggerPtr log = getLogger("SeqPoints"); struct Point { diff --git a/src/Storages/DiskCache/tests/gtest_device_test.cpp b/src/Storages/DiskCache/tests/gtest_device_test.cpp index 789f0aa1e2..6a5e1ae007 100644 --- a/src/Storages/DiskCache/tests/gtest_device_test.cpp +++ b/src/Storages/DiskCache/tests/gtest_device_test.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include namespace ProfileEvents @@ -47,7 +48,7 @@ struct DeviceParamTest : public testing::TestWithParam { DeviceParamTest() : io_engine_(std::get<0>(GetParam())), q_depth_(std::get<1>(GetParam())) { - Device::logger().information(fmt::format("DeviceParamTest: ioEngine={}, qDepth={}", getIoEngineName(io_engine_), q_depth_)); + LOG_INFO(getLogger("DeviceParamTest"), "ioEngine={}, qDepth={}", getIoEngineName(io_engine_), q_depth_); } protected: diff --git a/src/Storages/Distributed/DirectoryMonitor.cpp b/src/Storages/Distributed/DirectoryMonitor.cpp index a83e175830..0f4178bc70 100644 --- a/src/Storages/Distributed/DirectoryMonitor.cpp +++ b/src/Storages/Distributed/DirectoryMonitor.cpp @@ -91,7 +91,7 @@ namespace constexpr const std::chrono::minutes decrease_error_count_period{5}; template - ConnectionPoolPtrs createPoolsForAddresses(const std::string & name, PoolFactory && factory, const Cluster::ShardsInfo & shards_info, Poco::Logger * log) + ConnectionPoolPtrs createPoolsForAddresses(const std::string & name, PoolFactory && factory, const Cluster::ShardsInfo & shards_info, LoggerPtr log) { ConnectionPoolPtrs pools; @@ -167,7 +167,7 @@ namespace Block block_header; }; - DistributedHeader readDistributedHeader(ReadBufferFromFile & in, Poco::Logger * log) + DistributedHeader readDistributedHeader(ReadBufferFromFile & in, LoggerPtr log) { DistributedHeader distributed_header; @@ -310,7 +310,7 @@ namespace RemoteBlockOutputStream & remote, bool compression_expected, ReadBufferFromFile & in, - Poco::Logger * log) + LoggerPtr log) { if (!remote.getHeader()) { @@ -376,7 +376,7 @@ StorageDistributedDirectoryMonitor::StorageDistributedDirectoryMonitor( , default_sleep_time(storage.getContext()->getSettingsRef().distributed_directory_monitor_sleep_time_ms.totalMilliseconds()) , sleep_time(default_sleep_time) , max_sleep_time(storage.getContext()->getSettingsRef().distributed_directory_monitor_max_sleep_time_ms.totalMilliseconds()) - , log(&Poco::Logger::get(getLoggerName())) + , log(getLogger(getLoggerName())) , monitor_blocker(monitor_blocker_) , metric_pending_files(CurrentMetrics::DistributedFilesToInsert, 0) , metric_broken_files(CurrentMetrics::BrokenDistributedFilesToInsert, 0) @@ -924,7 +924,7 @@ public: std::unique_ptr decompressing_in; std::unique_ptr block_in; - Poco::Logger * log = nullptr; + LoggerPtr log = nullptr; Block first_block; @@ -933,7 +933,7 @@ public: in = std::make_unique(file_name); decompressing_in = std::make_unique(*in); block_in = std::make_unique(*decompressing_in, DBMS_TCP_PROTOCOL_VERSION); - log = &Poco::Logger::get("DirectoryMonitorSource"); + log = getLogger("DirectoryMonitorSource"); readDistributedHeader(*in, log); diff --git a/src/Storages/Distributed/DirectoryMonitor.h b/src/Storages/Distributed/DirectoryMonitor.h index 610ae4877e..c8328fcbcb 100644 --- a/src/Storages/Distributed/DirectoryMonitor.h +++ b/src/Storages/Distributed/DirectoryMonitor.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include @@ -134,7 +135,7 @@ private: std::chrono::time_point last_decrease_time {std::chrono::system_clock::now()}; std::atomic quit {false}; std::mutex mutex; - Poco::Logger * log; + LoggerPtr log; ActionBlocker & monitor_blocker; BackgroundSchedulePoolTaskHolder task_handle; diff --git a/src/Storages/Distributed/DistributedBlockOutputStream.cpp b/src/Storages/Distributed/DistributedBlockOutputStream.cpp index f745513ff7..c1748dc9ca 100644 --- a/src/Storages/Distributed/DistributedBlockOutputStream.cpp +++ b/src/Storages/Distributed/DistributedBlockOutputStream.cpp @@ -82,7 +82,7 @@ namespace ErrorCodes extern const int TOO_LARGE_DISTRIBUTED_DEPTH; } -static Block adoptBlock(const Block & header, const Block & block, Poco::Logger * log) +static Block adoptBlock(const Block & header, const Block & block, LoggerPtr log) { if (blocksHaveEqualStructure(header, block)) return block; @@ -99,7 +99,7 @@ static Block adoptBlock(const Block & header, const Block & block, Poco::Logger } -static void writeBlockConvert(const BlockOutputStreamPtr & out, const Block & block, size_t repeats, Poco::Logger * log) +static void writeBlockConvert(const BlockOutputStreamPtr & out, const Block & block, size_t repeats, LoggerPtr log) { Block adopted_block = adoptBlock(out->getHeader(), block, log); for (size_t i = 0; i < repeats; ++i) @@ -126,7 +126,7 @@ DistributedBlockOutputStream::DistributedBlockOutputStream( , allow_materialized(context->getSettingsRef().insert_allow_materialized_columns) , insert_timeout(insert_timeout_) , main_table(main_table_) - , log(&Poco::Logger::get("DistributedBlockOutputStream")) + , log(getLogger("DistributedBlockOutputStream")) { const auto & settings = context->getSettingsRef(); if (settings.max_distributed_depth && context->getClientInfo().distributed_depth > settings.max_distributed_depth) diff --git a/src/Storages/Distributed/DistributedBlockOutputStream.h b/src/Storages/Distributed/DistributedBlockOutputStream.h index 0ae57ce053..a70b299662 100644 --- a/src/Storages/Distributed/DistributedBlockOutputStream.h +++ b/src/Storages/Distributed/DistributedBlockOutputStream.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -141,7 +142,7 @@ private: std::atomic finished_jobs_count{0}; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Storages/DistributedDataClient.h b/src/Storages/DistributedDataClient.h index 0786f6918a..c004f4df02 100644 --- a/src/Storages/DistributedDataClient.h +++ b/src/Storages/DistributedDataClient.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -64,7 +65,7 @@ public: String remote_file_path; UInt64 remote_file_size; DataQueuePtr queue; - Poco::Logger * log = &Poco::Logger::get("StreamClientHandler"); + LoggerPtr log = getLogger("StreamClientHandler"); }; class StreamClientHandler : public brpc::StreamInputHandler @@ -98,7 +99,7 @@ private: std::shared_ptr file_reader; DistributedDataClientOption option; - Poco::Logger * log = &Poco::Logger::get("StreamClientHandler"); + LoggerPtr log = getLogger("StreamClientHandler"); }; class DistributedDataClient @@ -132,7 +133,7 @@ public: std::optional read_rate_throttler; - Poco::Logger * log = &Poco::Logger::get("DistributedDataClient"); + LoggerPtr log = getLogger("DistributedDataClient"); }; } diff --git a/src/Storages/DistributedDataCommon.cpp b/src/Storages/DistributedDataCommon.cpp index 584eb7a2d2..887502f39a 100644 --- a/src/Storages/DistributedDataCommon.cpp +++ b/src/Storages/DistributedDataCommon.cpp @@ -25,12 +25,12 @@ bool brpcWriteWithRetry(brpc::StreamId id, const butil::IOBuf & buf, int retry_c { // TODO: retain stream object before finish code is read. // Ingore error when writing to the closed stream, because this stream is closed by remote peer before read any finish code. - LOG_INFO(&Poco::Logger::get("DistributedDataCommon"), "Stream-{}, file info {} is closed", id, message); + LOG_INFO(getLogger("DistributedDataCommon"), "Stream-{}, file info {} is closed", id, message); return false; } LOG_TRACE( - &Poco::Logger::get("DistributedDataCommon"), + getLogger("DistributedDataCommon"), "Stream write buffer full wait for {} ms, remaining retry count-{}, stream_id-{}, {} wait res code: {} size:{} ", // todo(jiashuo): support distributed file stealing STREAM_WAIT_TIMEOUT_MS, retry_count, @@ -42,14 +42,14 @@ bool brpcWriteWithRetry(brpc::StreamId id, const butil::IOBuf & buf, int retry_c else if (rect_code == EINVAL) { // Ingore error when writing to the closed stream, because this stream is closed by remote peer before read any finish code. - LOG_INFO(&Poco::Logger::get("DistributedDataCommon"), "Stream-{} with {} is closed", id, message); + LOG_INFO(getLogger("DistributedDataCommon"), "Stream-{} with {} is closed", id, message); return false; } else if (rect_code == 1011) //EOVERCROWDED | 1011 | The server is overcrowded { bthread_usleep(1000 * 1000); LOG_WARNING( - &Poco::Logger::get("DistributedDataCommon"), + getLogger("DistributedDataCommon"), "Stream-{} write buffer error rect_code: EOVERCROWDED, server is overcrowded: {}", id, message); @@ -64,7 +64,7 @@ bool brpcWriteWithRetry(brpc::StreamId id, const butil::IOBuf & buf, int retry_c if (rc == EINVAL) return false; LOG_INFO( - &Poco::Logger::get("DistributedDataCommon"), + getLogger("DistributedDataCommon"), "Stream-{} write receive finish request, finish code: -1: {}", id, rect_code, diff --git a/src/Storages/DistributedDataService.cpp b/src/Storages/DistributedDataService.cpp index f24520c189..66aee29e68 100644 --- a/src/Storages/DistributedDataService.cpp +++ b/src/Storages/DistributedDataService.cpp @@ -141,7 +141,7 @@ int StreamServiceHandler::on_received_messages(brpc::StreamId id, butil::IOBuf * bool success = brpcWriteWithRetry(id, out.getFinishedBuf(), retry, message); LOG_TRACE( - &Poco::Logger::get("StreamServiceHandler"), + getLogger("StreamServiceHandler"), "Response(success = {}) read request[{}] stream_id: {}, " "offset: {}, length: {}({}), total take {}ms from first request id: {}", success, diff --git a/src/Storages/DistributedDataService.h b/src/Storages/DistributedDataService.h index 48faec6e59..23e6f1315d 100644 --- a/src/Storages/DistributedDataService.h +++ b/src/Storages/DistributedDataService.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -37,7 +38,7 @@ protected: int max_retry_count{3}; DisksMap disks; - Poco::Logger * log = &Poco::Logger::get("DistributedDataService"); + LoggerPtr log = getLogger("DistributedDataService"); }; REGISTER_SERVICE_IMPL(DistributedDataService); @@ -72,7 +73,7 @@ private: UInt64 file_size; int max_retry_count; - Poco::Logger * log = &Poco::Logger::get("StreamServiceHandler"); + LoggerPtr log = getLogger("StreamServiceHandler"); }; } diff --git a/src/Storages/HDFS/HDFSAuth.cpp b/src/Storages/HDFS/HDFSAuth.cpp index 52c19d6300..58ab4cd22d 100644 --- a/src/Storages/HDFS/HDFSAuth.cpp +++ b/src/Storages/HDFS/HDFSAuth.cpp @@ -88,9 +88,9 @@ void HDFSKrb5Params::runKinit() const { if (need_kinit) { - LOG_DEBUG(&Poco::Logger::get("HDFSClient"), "Running KerberosInit"); + LOG_DEBUG(getLogger("HDFSClient"), "Running KerberosInit"); kerberosInit(hadoop_kerberos_keytab, hadoop_kerberos_principal, hadoop_security_kerberos_ticket_cache_path, kinit_timeout); - LOG_DEBUG(&Poco::Logger::get("HDFSClient"), "Finished KerberosInit"); + LOG_DEBUG(getLogger("HDFSClient"), "Finished KerberosInit"); } } diff --git a/src/Storages/HDFS/HDFSCommon.cpp b/src/Storages/HDFS/HDFSCommon.cpp index 76ebce489a..0160212a1f 100644 --- a/src/Storages/HDFS/HDFSCommon.cpp +++ b/src/Storages/HDFS/HDFSCommon.cpp @@ -134,7 +134,7 @@ String HDFSBuilderWrapper::getKinitCmd() void HDFSBuilderWrapper::runKinit() { String cmd = getKinitCmd(); - LOG_DEBUG(&Poco::Logger::get("HDFSClient"), "running kinit: {}", cmd); + LOG_DEBUG(getLogger("HDFSClient"), "running kinit: {}", cmd); std::unique_lock lck(kinit_mtx); diff --git a/src/Storages/HDFS/HDFSFileSystem.cpp b/src/Storages/HDFS/HDFSFileSystem.cpp index 58dc2944cd..915b92d8b7 100644 --- a/src/Storages/HDFS/HDFSFileSystem.cpp +++ b/src/Storages/HDFS/HDFSFileSystem.cpp @@ -147,7 +147,7 @@ void setHdfsDirectConfig(HDFSBuilderPtr & builder, const String & hdfs_user, con HDFSBuilderPtr createHDFSBuilder(const Poco::URI & uri, const String & hdfs_user, const String & nnproxy) { - LOG_TRACE(&Poco::Logger::get(__func__), "params uri: {} hdfs_user: {} nnproxy: {} " , uri.toString(), hdfs_user, nnproxy); + LOG_TRACE(getLogger(__func__), "params uri: {} hdfs_user: {} nnproxy: {} " , uri.toString(), hdfs_user, nnproxy); HDFSBuilderPtr builder(hdfsNewBuilder()); if (builder == nullptr) throw Exception( @@ -182,7 +182,7 @@ HDFSBuilderPtr createHDFSBuilder(const Poco::URI & uri, const String & hdfs_user proxies_str += host_with_port.getTCPAddress() + ","; proxies.emplace_back(normalizeHost(host_with_port.getHost()), host_with_port.tcp_port); } - LOG_INFO(&Poco::Logger::get("HDFSFileSystem"), "Construct ha hdfs nn proxies {}", proxies_str); + LOG_INFO(getLogger("HDFSFileSystem"), "Construct ha hdfs nn proxies {}", proxies_str); service_name = "nnproxy"; } else if (use_ha) { @@ -192,7 +192,7 @@ HDFSBuilderPtr createHDFSBuilder(const Poco::URI & uri, const String & hdfs_user const Poco::URI proxy_uri(addr); proxies.emplace_back(normalizeHost(proxy_uri.getHost()),proxy_uri.getPort()); } - LOG_INFO(&Poco::Logger::get("HDFSFileSystem"), "Construct ha hdfs namenodes {}", nnproxy); + LOG_INFO(getLogger("HDFSFileSystem"), "Construct ha hdfs namenodes {}", nnproxy); service_name = "ha_namenodes"; } } @@ -285,7 +285,7 @@ void HDFSFileSystem::reconnect() const fs = new_fs; } - LOG_TRACE(&Poco::Logger::get("HDFSFileSystem"), "Reconnect host: {}", host.data()); + LOG_TRACE(getLogger("HDFSFileSystem"), "Reconnect host: {}", host.data()); } void HDFSFileSystem::reconnectIfNecessary() const @@ -991,7 +991,7 @@ HDFSBuilderPtr HDFSConnectionParams::createBuilder(const Poco::URI & uri) const // construct from uri. // uri is hdfs://host:ip/a/b or hdfs://my-hadoop/a/b - LOG_DEBUG(&Poco::Logger::get("HDFSConnectionParams"), "use nnproxy ha config: {}", toString()); + LOG_DEBUG(getLogger("HDFSConnectionParams"), "use nnproxy ha config: {}", toString()); auto raw_builder = hdfsNewBuilder(); if (raw_builder == nullptr) throw Exception("Unable to create HDFS builder, maybe hdfs3.xml missing" , ErrorCodes::BAD_ARGUMENTS); @@ -1028,13 +1028,13 @@ HDFSBuilderPtr HDFSConnectionParams::createBuilder(const Poco::URI & uri) const auto addrs_from_nnproxy = lookupAndShuffle(); if (use_nnproxy_ha) { - LOG_DEBUG(&Poco::Logger::get("HDFSConnectionParams"), "use nnproxy ha config"); + LOG_DEBUG(getLogger("HDFSConnectionParams"), "use nnproxy ha config"); setHdfsHaConfig(builder, hdfs_service, hdfs_user, addrs_from_nnproxy); return builder; } else { - LOG_DEBUG(&Poco::Logger::get("HDFSConnectionParams"), "use none nnproxy ha config"); + LOG_DEBUG(getLogger("HDFSConnectionParams"), "use none nnproxy ha config"); IpWithPort targetNode = addrs_from_nnproxy[0]; setHdfsDirectConfig(builder, hdfs_user, "hdfs://" + std::get<0>(safeNormalizeHost(targetNode.first)), targetNode.second); return builder; diff --git a/src/Storages/HDFS/ReadBufferFromByteHDFS.cpp b/src/Storages/HDFS/ReadBufferFromByteHDFS.cpp index 278b4d6849..c762b66947 100644 --- a/src/Storages/HDFS/ReadBufferFromByteHDFS.cpp +++ b/src/Storages/HDFS/ReadBufferFromByteHDFS.cpp @@ -54,7 +54,7 @@ namespace ErrorCodes static void ReadBufferFromHdfsCallBack(const hdfsEvent & event) { - // LOG_TRACE(&Poco::Logger::get("ReadBufferFromByteHDFS"), "get event {} & {}", event.eventType, event.value); + // LOG_TRACE(getLogger("ReadBufferFromByteHDFS"), "get event {} & {}", event.eventType, event.value); switch (event.eventType) { case Hdfs::Event::HDFS_EVENT_SLOWNODE: diff --git a/src/Storages/HDFS/StorageHDFS.h b/src/Storages/HDFS/StorageHDFS.h index f24992ba41..7ccfcb96d9 100644 --- a/src/Storages/HDFS/StorageHDFS.h +++ b/src/Storages/HDFS/StorageHDFS.h @@ -1,6 +1,7 @@ #pragma once #if !defined(ARCADIA_BUILD) +#include #include #endif @@ -54,7 +55,7 @@ private: String format_name; String compression_method; - Poco::Logger * log = &Poco::Logger::get("StorageHDFS"); + LoggerPtr log = getLogger("StorageHDFS"); }; } diff --git a/src/Storages/Hive/DirectoryLister.cpp b/src/Storages/Hive/DirectoryLister.cpp index d6a9026b4f..74421c06d8 100644 --- a/src/Storages/Hive/DirectoryLister.cpp +++ b/src/Storages/Hive/DirectoryLister.cpp @@ -63,7 +63,7 @@ DiskPtr getDiskFromURI(const String & sd_url, const ContextPtr & context, const String encoded_sd_url; Poco::URI::encode(sd_url, "", encoded_sd_url); Poco::URI uri(encoded_sd_url); - auto * log = &Poco::Logger::get(__func__); + auto log = getLogger(__func__); LOG_TRACE(log, "sd_url: {}\n encoded {}", sd_url, encoded_sd_url); const auto & scheme = uri.getScheme(); if (scheme == "hdfs") diff --git a/src/Storages/Hive/HiveBucketFilter.cpp b/src/Storages/Hive/HiveBucketFilter.cpp index 65ca2d5053..7cbf3ec4c6 100644 --- a/src/Storages/Hive/HiveBucketFilter.cpp +++ b/src/Storages/Hive/HiveBucketFilter.cpp @@ -66,7 +66,7 @@ Int64 getBuckHashCode(DataTypePtr & type, ColumnPtr & column, String & name) { DataTypePtr striped_type = type->isNullable() ? static_cast(type.get())->getNestedType() : type; Int64 hashcode = 0; - LOG_TRACE(&Poco::Logger::get("getBuckHashCode"), " bucket col type = {}", striped_type->getName()); + LOG_TRACE(getLogger("getBuckHashCode"), " bucket col type = {}", striped_type->getName()); if (WhichDataType(striped_type).isString()) { hashcode = hashBytes(name, 1, name.length() - 1); @@ -74,18 +74,18 @@ Int64 getBuckHashCode(DataTypePtr & type, ColumnPtr & column, String & name) else if (WhichDataType(striped_type).isInt8() || WhichDataType(striped_type).isInt16() || WhichDataType(striped_type).isInt32()) { hashcode = column->getInt(0); - LOG_TRACE(&Poco::Logger::get("getBuckHashCode"), " int bucket value = {}", hashcode); + LOG_TRACE(getLogger("getBuckHashCode"), " int bucket value = {}", hashcode); } else if (WhichDataType(striped_type).isUInt8() || WhichDataType(striped_type).isUInt16() || WhichDataType(striped_type).isUInt32()) { hashcode = column->getUInt(0); - LOG_TRACE(&Poco::Logger::get("getBuckHashCode"), " UInt bucket value = {}", hashcode); + LOG_TRACE(getLogger("getBuckHashCode"), " UInt bucket value = {}", hashcode); } else if (WhichDataType(striped_type).isUInt64()) { UInt64 bigint_value = column->getUInt(0); hashcode = ((bigint_value >> 32) ^ bigint_value); - LOG_TRACE(&Poco::Logger::get("getBuckHashCode"), "UInt64 bucket value = {}, hashcode = ", bigint_value, hashcode); + LOG_TRACE(getLogger("getBuckHashCode"), "UInt64 bucket value = {}, hashcode = ", bigint_value, hashcode); } else if (WhichDataType(striped_type).isInt64()) { @@ -93,7 +93,7 @@ Int64 getBuckHashCode(DataTypePtr & type, ColumnPtr & column, String & name) UInt64 cast_bigint_value = static_cast(bigint_value); hashcode = ((cast_bigint_value >> 32) ^ cast_bigint_value); LOG_TRACE( - &Poco::Logger::get("getBuckHashCode"), + getLogger("getBuckHashCode"), "bigint bucket value = bigintValue {}, cast_bigint_value = = {} hashcode = {}", bigint_value, cast_bigint_value, @@ -102,12 +102,12 @@ Int64 getBuckHashCode(DataTypePtr & type, ColumnPtr & column, String & name) else if (WhichDataType(striped_type).isFloat32()) { hashcode = column->getInt(0); - LOG_TRACE(&Poco::Logger::get("getBuckHashCode"), "Float32 bucket value = {}", hashcode); + LOG_TRACE(getLogger("getBuckHashCode"), "Float32 bucket value = {}", hashcode); } else if (WhichDataType(striped_type).isDate()) { hashcode = column->getInt(0); - LOG_TRACE(&Poco::Logger::get("getBuckHashCode"), "Date bucket value = {}", hashcode); + LOG_TRACE(getLogger("getBuckHashCode"), "Date bucket value = {}", hashcode); } else { @@ -130,7 +130,7 @@ ColumnPtr createColumnWithHiveHash(Block & block, const Block & bucket_columns, auto name = column.name; LOG_TRACE( - &Poco::Logger::get("createColumnWithHiveHash"), + getLogger("createColumnWithHiveHash"), " createColumnWithHiveHash bucket_column_type type = {} col name = {}", bucket_column_type->getName(), name); @@ -149,7 +149,7 @@ ASTs extractBucketColumnExpression(const ASTs & conditions, Names bucket_columns for (const auto & condition : conditions) { - LOG_TRACE(&Poco::Logger::get("getBuckHashCode"), " condition: {}", queryToString(condition)); + LOG_TRACE(getLogger("getBuckHashCode"), " condition: {}", queryToString(condition)); const auto & ast_func = typeid_cast(condition.get()); if (!ast_func) diff --git a/src/Storages/Hive/HiveFile/IHiveFile.cpp b/src/Storages/Hive/HiveFile/IHiveFile.cpp index 733be2396f..84defbe03a 100644 --- a/src/Storages/Hive/HiveFile/IHiveFile.cpp +++ b/src/Storages/Hive/HiveFile/IHiveFile.cpp @@ -112,7 +112,7 @@ String IHiveFile::getFormatName() const std::unique_ptr IHiveFile::readFile(const ReadSettings & settings) const { - auto * log = &Poco::Logger::get(__func__); + auto log = getLogger(__func__); auto cache_strategy = DiskCacheFactory::instance().tryGet(DiskCacheType::Hive); if (cache_strategy && settings.disk_cache_mode < DiskCacheMode::SKIP_DISK_CACHE) { diff --git a/src/Storages/Hive/HiveSchemaConverter.h b/src/Storages/Hive/HiveSchemaConverter.h index 0854c9dfea..84bfcd0252 100644 --- a/src/Storages/Hive/HiveSchemaConverter.h +++ b/src/Storages/Hive/HiveSchemaConverter.h @@ -1,5 +1,6 @@ #pragma once +#include #include "DataTypes/IDataType.h" #include "Interpreters/Context_fwd.h" #include "Parsers/ASTCreateQuery.h" @@ -30,7 +31,7 @@ public: private: std::shared_ptr hive_table; - Poco::Logger * log{&Poco::Logger::get("HiveSchemaConverter")}; + LoggerPtr log {getLogger("HiveSchemaConverter")}; }; diff --git a/src/Storages/Hive/Metastore/HiveMetastore.cpp b/src/Storages/Hive/Metastore/HiveMetastore.cpp index 14c5ee0017..a7d1d28a5d 100644 --- a/src/Storages/Hive/Metastore/HiveMetastore.cpp +++ b/src/Storages/Hive/Metastore/HiveMetastore.cpp @@ -35,7 +35,7 @@ static const int hive_metastore_client_recv_timeout_ms = 10000; static const int hive_metastore_client_send_timeout_ms = 10000; ThriftHiveMetastoreClientPool::ThriftHiveMetastoreClientPool(ThriftHiveMetastoreClientBuilder builder_) - : PoolBase(max_hive_metastore_client_connections, &Poco::Logger::get("ThriftHiveMetastoreClientPool")), builder(builder_) + : PoolBase(max_hive_metastore_client_connections, getLogger("ThriftHiveMetastoreClientPool")), builder(builder_) { } diff --git a/src/Storages/Hive/StorageCloudHive.h b/src/Storages/Hive/StorageCloudHive.h index 75653e5871..e4b1c4625b 100644 --- a/src/Storages/Hive/StorageCloudHive.h +++ b/src/Storages/Hive/StorageCloudHive.h @@ -1,5 +1,6 @@ #pragma once +#include #include "Common/config.h" #if USE_HIVE @@ -48,7 +49,7 @@ private: HiveFiles files; std::shared_ptr storage_settings; - Poco::Logger * log {&Poco::Logger::get("CloudHive")}; + LoggerPtr log {getLogger("CloudHive")}; CacheHolderPtr cache_holder; }; diff --git a/src/Storages/Hive/StorageCnchHive.cpp b/src/Storages/Hive/StorageCnchHive.cpp index 5c6f64df5f..2788502bd0 100644 --- a/src/Storages/Hive/StorageCnchHive.cpp +++ b/src/Storages/Hive/StorageCnchHive.cpp @@ -300,7 +300,7 @@ ASTPtr StorageCnchHive::applyFilter( std::move(column_compressed_sizes), getInMemoryMetadataPtr(), current_info.syntax_analyzer_result->requiredSourceColumns(), - &Poco::Logger::get("OptimizerEarlyPrewherePushdown")}; + getLogger("OptimizerEarlyPrewherePushdown")}; } } else if (HiveMoveToPrewhereMethod::STATS == settings.hive_move_to_prewhere_method) diff --git a/src/Storages/Hive/StorageCnchHive.h b/src/Storages/Hive/StorageCnchHive.h index d884ab5a90..1ccc484a3f 100644 --- a/src/Storages/Hive/StorageCnchHive.h +++ b/src/Storages/Hive/StorageCnchHive.h @@ -1,5 +1,6 @@ #pragma once +#include #include #if USE_HIVE @@ -87,7 +88,7 @@ protected: std::exception_ptr hive_exception = nullptr; private: - Poco::Logger * log {&Poco::Logger::get("CnchHive")}; + LoggerPtr log {getLogger("CnchHive")}; }; } diff --git a/src/Storages/Hive/StorageHiveSource.h b/src/Storages/Hive/StorageHiveSource.h index d4be8bd0e2..9575f8f263 100644 --- a/src/Storages/Hive/StorageHiveSource.h +++ b/src/Storages/Hive/StorageHiveSource.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include "Common/config.h" #if USE_HIVE @@ -75,7 +76,7 @@ private: std::shared_ptr read_params; std::unique_ptr pipeline; std::unique_ptr reader; - Poco::Logger * log {&Poco::Logger::get("StorageHiveSource")}; + LoggerPtr log {getLogger("StorageHiveSource")}; }; } diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index 09b24a2efe..c7231c6ab6 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -656,11 +657,6 @@ public: /// We do not use mutex because it is not very important that the size could change during the operation. virtual void checkPartitionCanBeDropped(const ASTPtr & /*partition*/) {} - virtual Poco::Logger* getLogger() const - { - throw Exception("Method getLogger is not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED); - } - /// Returns true if Storage may store some data on disk. /// NOTE: may not be equivalent to !getDataPaths().empty() virtual bool storesDataOnDisk() const { return false; } diff --git a/src/Storages/IngestColumnCnch/IngestColumnBlockInputStream.cpp b/src/Storages/IngestColumnCnch/IngestColumnBlockInputStream.cpp index afbe313c18..10a71370b1 100644 --- a/src/Storages/IngestColumnCnch/IngestColumnBlockInputStream.cpp +++ b/src/Storages/IngestColumnCnch/IngestColumnBlockInputStream.cpp @@ -77,7 +77,7 @@ IngestColumnBlockInputStream::IngestColumnBlockInputStream( ) : target_storage{std::move(target_storage_)}, context(std::move(local_context)), - log(target_storage->getLogger()) + log(getLogger(target_storage->getStorageID().getNameForLogs() + " (IngestColumn)")) { source_storage = context->tryGetCnchWorkerResource()->getTable(StorageID{command.from_database, command.from_table}); @@ -169,7 +169,7 @@ Block IngestColumnBlockInputStream::readImpl() if (getCurrentVisibleSourceParts().empty()) continue; - + MemoryEfficientIngestColumn executor{*this}; executor.execute(); } diff --git a/src/Storages/IngestColumnCnch/IngestColumnBlockInputStream.h b/src/Storages/IngestColumnCnch/IngestColumnBlockInputStream.h index cc0fd7c4f4..ae75f3562c 100644 --- a/src/Storages/IngestColumnCnch/IngestColumnBlockInputStream.h +++ b/src/Storages/IngestColumnCnch/IngestColumnBlockInputStream.h @@ -1,4 +1,5 @@ #pragma once +#include #include #include #include @@ -52,7 +53,7 @@ private: Int64 cur_bucket_index = -1; // if cur_bcuket_index = -1 use ordinary ingest std::vector buckets_for_ingest; ContextPtr context; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Storages/IngestColumnCnch/IngestColumnCnch.cpp b/src/Storages/IngestColumnCnch/IngestColumnCnch.cpp index 4e9b27bad9..0d7f135f3b 100644 --- a/src/Storages/IngestColumnCnch/IngestColumnCnch.cpp +++ b/src/Storages/IngestColumnCnch/IngestColumnCnch.cpp @@ -150,7 +150,7 @@ BlockInputStreamPtr forwardIngestPartitionToWorkerWithBucketTableImpl( const PartitionCommand & command, const WorkerGroupHandle & worker_group, const ContextPtr & context, - Poco::Logger * log) + LoggerPtr log) { auto workers_index_with_task = getIngestWorkersIndexAndJobsWithOrder( worker_group->getMetrics().worker_metrics_vec, context->getSettingsRef().max_ingest_task_on_workers); @@ -211,7 +211,7 @@ BlockInputStreamPtr forwardIngestPartitionToWorker( if (auto sleep_ms = context->getSettingsRef().sleep_in_send_ingest_to_worker_ms.totalMilliseconds()) sleepForMilliseconds(sleep_ms); - Poco::Logger * log = target_table.getLogger(); + LoggerPtr log = target_table.getLogger(); TxnTimestamp txn_id = context->getCurrentTransactionID(); std::hash hasher; const String transaction_string = toString(txn_id.toUInt64()); @@ -272,7 +272,7 @@ Pipe ingestPartitionInServer( const struct PartitionCommand & command, ContextPtr local_context) { - Poco::Logger * log = storage.getLogger(); + LoggerPtr log = storage.getLogger(); LOG_DEBUG(log, "execute ingest partition in server"); StorageMetadataPtr target_meta_data_ptr = storage.getInMemoryMetadataPtr(); const Names & column_names = command.column_names; diff --git a/src/Storages/IngestColumnCnch/IngestColumnHelper.cpp b/src/Storages/IngestColumnCnch/IngestColumnHelper.cpp index a1a4fe8c19..e6a4ca68cc 100644 --- a/src/Storages/IngestColumnCnch/IngestColumnHelper.cpp +++ b/src/Storages/IngestColumnCnch/IngestColumnHelper.cpp @@ -400,12 +400,12 @@ clusterDataPartWithBucketTable(const StorageCloudMergeTree & table, const IMerge for (const auto& data_part : data_parts) { LOG_TRACE( - &Poco::Logger::get("clusterDataPartWithBucketTable"), + getLogger("clusterDataPartWithBucketTable"), data_part->name + " bucket_number:" + std::to_string(data_part->bucket_number)); if (data_part->table_definition_hash != table_definition_hash.getDeterminHash()) { - LOG_DEBUG(&Poco::Logger::get("clusterDataPartWithBucketTable"), "data part not match current cluster by definition"); + LOG_DEBUG(getLogger("clusterDataPartWithBucketTable"), "data part not match current cluster by definition"); return {}; } if (data_part->bucket_number >= 0 && static_cast(data_part->bucket_number) < data_part_with_bucket.size()) diff --git a/src/Storages/IngestColumnCnch/memoryEfficientIngestColumn.h b/src/Storages/IngestColumnCnch/memoryEfficientIngestColumn.h index 8a9bc31b73..803656c160 100644 --- a/src/Storages/IngestColumnCnch/memoryEfficientIngestColumn.h +++ b/src/Storages/IngestColumnCnch/memoryEfficientIngestColumn.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -72,7 +73,7 @@ private: const MergeTreeDataPartsVector & visible_target_parts; const MergeTreeDataPartsVector & visible_source_parts; const size_t number_of_threads_for_read_source_parts; - Poco::Logger * log; + LoggerPtr log; /// Below is intermediate data to serve the algorithm /// Maping each part to an interger/ part_id diff --git a/src/Storages/IngestColumnCnch/memoryEfficientIngestColumnHelper.cpp b/src/Storages/IngestColumnCnch/memoryEfficientIngestColumnHelper.cpp index c3afce35ad..14d211de99 100644 --- a/src/Storages/IngestColumnCnch/memoryEfficientIngestColumnHelper.cpp +++ b/src/Storages/IngestColumnCnch/memoryEfficientIngestColumnHelper.cpp @@ -428,7 +428,7 @@ void writeBlock( const size_t number_of_buckets, IBlockOutputStream & new_part_output, const StorageMetadataPtr & target_meta_data_ptr, - Poco::Logger * log) + LoggerPtr log) { Arena temporary_keys_pool; StringRefHash hasher; diff --git a/src/Storages/IngestColumnCnch/memoryEfficientIngestColumnHelper.h b/src/Storages/IngestColumnCnch/memoryEfficientIngestColumnHelper.h index 7477c01c31..bec0bf08f8 100644 --- a/src/Storages/IngestColumnCnch/memoryEfficientIngestColumnHelper.h +++ b/src/Storages/IngestColumnCnch/memoryEfficientIngestColumnHelper.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -146,7 +147,7 @@ void writeBlock( const size_t number_of_buckets, IBlockOutputStream & new_part_output, const StorageMetadataPtr & target_meta_data_ptr, - Poco::Logger * log); + LoggerPtr log); Names getColumnsFromSourceTableForInsertNewPart( const Names & ordered_key_names, diff --git a/src/Storages/IngestPartition.cpp b/src/Storages/IngestPartition.cpp index 78a0a9ef2c..8d9ff1f36a 100644 --- a/src/Storages/IngestPartition.cpp +++ b/src/Storages/IngestPartition.cpp @@ -350,7 +350,7 @@ IngestPartition::IngestPartition( , key_names(key_names_) , mutation(mutation_) , context(context_) - , log(&Poco::Logger::get("IngestPartition")) {} + , log(getLogger("IngestPartition")) {} /*** @@ -705,7 +705,7 @@ MergeTreeData::MutableDataPartPtr IngestPartition::ingestPart(MergeTreeData & da auto future_part = ingest_part->future_part; const auto & target_part = future_part.parts[0]; - LOG_TRACE(&Poco::Logger::get("Ingestion"), "Begin ingest part {}", target_part->name); + LOG_TRACE(getLogger("Ingestion"), "Begin ingest part {}", target_part->name); bool is_wide_part = isWidePart(target_part); @@ -781,7 +781,7 @@ MergeTreeData::MutableDataPartPtr IngestPartition::ingestPart(MergeTreeData & da check_cached_cancel); } - LOG_TRACE(&Poco::Logger::get("Ingestion"), "End ingest part {}", future_part.name); + LOG_TRACE(getLogger("Ingestion"), "End ingest part {}", future_part.name); return new_data_part; } @@ -792,11 +792,11 @@ void IngestPartition::ingestion(MergeTreeData & data, const IngestParts & parts_ { if (src_blocks.empty()) { - LOG_TRACE(&Poco::Logger::get("VirtualColumnUtils"), "Read source block is empty, skip ingestion"); + LOG_TRACE(getLogger("VirtualColumnUtils"), "Read source block is empty, skip ingestion"); return; } - LOG_TRACE(&Poco::Logger::get("Ingestion"), "Ingestion task with parts {}, source_blocks {}, ingest_column_name {}, ordered_key_names {}, all_columns {}", + LOG_TRACE(getLogger("Ingestion"), "Ingestion task with parts {}, source_blocks {}, ingest_column_name {}, ordered_key_names {}, all_columns {}", parts_to_ingest.size(), src_blocks.size(), ingest_column_names.size(), diff --git a/src/Storages/IngestPartition.h b/src/Storages/IngestPartition.h index c7307d4d58..d6233a23ac 100644 --- a/src/Storages/IngestPartition.h +++ b/src/Storages/IngestPartition.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include @@ -138,7 +139,7 @@ private: Names key_names; Int64 mutation = 0; ContextPtr context; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Storages/Kafka/CnchKafkaConsumerScheduler.cpp b/src/Storages/Kafka/CnchKafkaConsumerScheduler.cpp index 5726857c11..a20d39cdba 100644 --- a/src/Storages/Kafka/CnchKafkaConsumerScheduler.cpp +++ b/src/Storages/Kafka/CnchKafkaConsumerScheduler.cpp @@ -27,7 +27,7 @@ namespace DB IKafkaConsumerScheduler::IKafkaConsumerScheduler(const String &vw_name_, const KafkaConsumerScheduleMode schedule_mode_, ContextPtr context_) : vw_name(std::move(vw_name_)), schedule_mode(schedule_mode_), global_context(context_->getGlobalContext()), - log(&Poco::Logger::get("KafkaConsumer" + String(getScheduleModeName()) + "Scheduler")) + log(getLogger("KafkaConsumer" + String(getScheduleModeName()) + "Scheduler")) { initOrUpdateWorkerPool(); } diff --git a/src/Storages/Kafka/CnchKafkaConsumerScheduler.h b/src/Storages/Kafka/CnchKafkaConsumerScheduler.h index 4f4fa40a1f..3deb1cb179 100644 --- a/src/Storages/Kafka/CnchKafkaConsumerScheduler.h +++ b/src/Storages/Kafka/CnchKafkaConsumerScheduler.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #if USE_RDKAFKA @@ -77,7 +78,7 @@ protected: VirtualWarehouseHandle vw_handle; KafkaConsumerScheduleMode schedule_mode; ContextPtr global_context; - Poco::Logger * log; + LoggerPtr log; const time_t min_running_time_for_reschedule{60}; }; diff --git a/src/Storages/Kafka/CnchKafkaOffsetManager.cpp b/src/Storages/Kafka/CnchKafkaOffsetManager.cpp index 0971787244..02bf1f6ff1 100644 --- a/src/Storages/Kafka/CnchKafkaOffsetManager.cpp +++ b/src/Storages/Kafka/CnchKafkaOffsetManager.cpp @@ -38,7 +38,7 @@ constexpr auto RESET_CONSUME_OFFSET_BREAK_TIME = 1; CnchKafkaOffsetManager::CnchKafkaOffsetManager(const StorageID & storage_id, ContextMutablePtr context_) : WithMutableContext(context_->getGlobalContext()) - , log(&Poco::Logger::get(storage_id.getFullTableName() + " (CnchKafkaOffsetManager)")) + , log(getLogger(storage_id.getFullTableName() + " (CnchKafkaOffsetManager)")) { storage = getContext()->getCnchCatalog()->getTable(*getContext(), storage_id.database_name, storage_id.table_name, getContext()->getTimestamp()); kafka_table = dynamic_cast(storage.get()); diff --git a/src/Storages/Kafka/CnchKafkaOffsetManager.h b/src/Storages/Kafka/CnchKafkaOffsetManager.h index a6534ecf96..20b28e4196 100644 --- a/src/Storages/Kafka/CnchKafkaOffsetManager.h +++ b/src/Storages/Kafka/CnchKafkaOffsetManager.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #if USE_RDKAFKA @@ -57,7 +58,7 @@ private: StoragePtr storage = nullptr; /// Used to ensure the life cycle StorageCnchKafka * kafka_table = nullptr; - Poco::Logger * log; + LoggerPtr log; }; using CnchKafkaOffsetManagerPtr = std::shared_ptr; diff --git a/src/Storages/Kafka/CnchReadBufferFromKafkaConsumer.h b/src/Storages/Kafka/CnchReadBufferFromKafkaConsumer.h index 87ff92b46b..bcb6648d68 100644 --- a/src/Storages/Kafka/CnchReadBufferFromKafkaConsumer.h +++ b/src/Storages/Kafka/CnchReadBufferFromKafkaConsumer.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #if USE_RDKAFKA @@ -65,7 +66,7 @@ public: bool enable_skip_offsets_hole_) : ReadBuffer(nullptr, 0) , consumer(consumer_) - , log(&Poco::Logger::get(logger_name)) + , log(getLogger(logger_name)) , batch_size(max_batch_size) , poll_timeout(poll_timeout_) , expire_timeout(expire_timeout_) @@ -115,7 +116,7 @@ public: private: ConsumerPtr consumer; - Poco::Logger * log; + LoggerPtr log; size_t batch_size; size_t poll_timeout; size_t expire_timeout; diff --git a/src/Storages/Kafka/IStorageCnchKafka.cpp b/src/Storages/Kafka/IStorageCnchKafka.cpp index 3f68c320b3..db1767ad2c 100644 --- a/src/Storages/Kafka/IStorageCnchKafka.cpp +++ b/src/Storages/Kafka/IStorageCnchKafka.cpp @@ -118,7 +118,7 @@ void IStorageCnchKafka::checkAndLoadingSettings(KafkaSettings &kafka_settings) { /// TODO: should we throw an Exception later to make a powerful restriction? if (kafka_settings.unique_group_prefix.value.empty()) - LOG_WARNING(&Poco::Logger::get("IStorageCnchKafka"), "No unique prefix set for tob kafka, which may cause duplicate keys for offset in bytekv"); + LOG_WARNING(getLogger("IStorageCnchKafka"), "No unique prefix set for tob kafka, which may cause duplicate keys for offset in bytekv"); } /// Use global schema_registry_url if the user does not set the table level setting parameter diff --git a/src/Storages/Kafka/KafkaCommon.cpp b/src/Storages/Kafka/KafkaCommon.cpp index fa4bbc4d0b..ac849a18ae 100644 --- a/src/Storages/Kafka/KafkaCommon.cpp +++ b/src/Storages/Kafka/KafkaCommon.cpp @@ -56,7 +56,7 @@ cppkafka::Configuration createConsumerConfiguration( ContextPtr context, const StorageID & storage_id, const Names & topics, const KafkaSettings & settings) { cppkafka::Configuration conf; - const auto log = &Poco::Logger::get(storage_id.getNameForLogs()); + const auto log = getLogger(storage_id.getNameForLogs()); /// 1) set from global configuration diff --git a/src/Storages/Kafka/StorageCloudKafka.cpp b/src/Storages/Kafka/StorageCloudKafka.cpp index a35dd00165..986a1cf6bb 100644 --- a/src/Storages/Kafka/StorageCloudKafka.cpp +++ b/src/Storages/Kafka/StorageCloudKafka.cpp @@ -77,7 +77,7 @@ StorageCloudKafka::StorageCloudKafka : IStorageCnchKafka(table_id_, context_, setting_changes_, settings_, columns_, constraints_), settings_adjustments(createSettingsAdjustments()), server_client_address(HostWithPorts::fromRPCAddress(addBracketsIfIpv6(server_client_host_) + ':' + toString(server_client_rpc_port_))), - log(&Poco::Logger::get(table_id_.getNameForLogs() + " (StorageCloudKafka)")), + log(getLogger(table_id_.getNameForLogs() + " (StorageCloudKafka)")), ////check_staged_area_task(context_->getCheckStagedAreaSchedulePool().createTask(log->name(), [this] { checkStagedArea(); })), check_staged_area_reschedule_ms(CHECK_STAGED_AREA_RESCHEDULE_MIN_MS) { @@ -464,7 +464,7 @@ void StorageCloudKafka::streamThread() ThreadFromGlobalPool([c = getContext(), db = database, tb = table] { try { - LOG_DEBUG(&Poco::Logger::get("SelfDropKafkaTable"), "Self-drop table: {}.{}", db, tb); + LOG_DEBUG(getLogger("SelfDropKafkaTable"), "Self-drop table: {}.{}", db, tb); /// Copy context in case the global_context would be invalid if the consumer is dropped auto drop_context = Context::createCopy(c); dropConsumerTables(drop_context, db, tb); @@ -538,7 +538,7 @@ bool StorageCloudKafka::streamToViews(/* required_column_names */) auto txn = std::make_shared(consume_context, server_client, table_id, assigned_consumer_index); if (number_tables_to_write > 1) { - LOG_DEBUG(&Poco::Logger::get("CnchKafkaWorker"), "Enable explicit commit txn while consumer needs to write {} tables", number_tables_to_write); + LOG_DEBUG(getLogger("CnchKafkaWorker"), "Enable explicit commit txn while consumer needs to write {} tables", number_tables_to_write); txn->enableExplicitCommit(); txn->setExplicitCommitStorageID(getStorageID()); } @@ -937,7 +937,7 @@ void dropConsumerTables(ContextMutablePtr context, const String & db_name, const auto dependencies = DatabaseCatalog::instance().getDependencies({db_name, tb_name}); if (dependencies.empty()) { - LOG_DEBUG(&Poco::Logger::get("CnchKafkaWorker"), "No dependencies found for " + db_name + "." + tb_name); + LOG_DEBUG(getLogger("CnchKafkaWorker"), "No dependencies found for " + db_name + "." + tb_name); tables_to_drop.emplace(backQuoteIfNeed(db_name) + "." + backQuoteIfNeed(tb_name)); } else @@ -953,7 +953,7 @@ void dropConsumerTables(ContextMutablePtr context, const String & db_name, const for (const auto & table_to_drop : tables_to_drop) { String drop_table_command = "DROP TABLE IF EXISTS " + table_to_drop; - LOG_DEBUG(&Poco::Logger::get("CnchKafkaWorker"), "DROP table : {}", drop_table_command); + LOG_DEBUG(getLogger("CnchKafkaWorker"), "DROP table : {}", drop_table_command); try { @@ -978,14 +978,14 @@ void createConsumerTables(const std::vector & create_table_commands, Con ParserCreateQuery parser; for (const auto & cmd : create_table_commands) { - LOG_DEBUG(&Poco::Logger::get("CnchKafkaWorker"), "CREATE local table: {}", cmd); + LOG_DEBUG(getLogger("CnchKafkaWorker"), "CREATE local table: {}", cmd); ASTPtr ast = parseQuery(parser, cmd, global_context->getSettings().max_query_size, global_context->getSettings().max_parser_depth); InterpreterCreateQuery interpreter_tb(ast, create_context); interpreter_tb.execute(); } - LOG_DEBUG(&Poco::Logger::get("CnchKafkaWorker"), "CREATE local tables on worker successfully"); + LOG_DEBUG(getLogger("CnchKafkaWorker"), "CREATE local tables on worker successfully"); } void executeKafkaConsumeTaskImpl(const KafkaTaskCommand & command, ContextMutablePtr context) @@ -1057,7 +1057,7 @@ void executeKafkaConsumeTask(const KafkaTaskCommand & command, ContextMutablePtr /// 2. Try to drop created local tables which can help trigger re-scheduling of ConsumeManager faster if (command.type == KafkaTaskCommand::Type::START_CONSUME) { - LOG_INFO(&Poco::Logger::get("KafkaConsumerTaskExecutor"), "Failed to execute START_CONSUME task, try to drop local tables"); + LOG_INFO(getLogger("KafkaConsumerTaskExecutor"), "Failed to execute START_CONSUME task, try to drop local tables"); try { dropConsumerTables(context, command.local_database_name, command.local_table_name); diff --git a/src/Storages/Kafka/StorageCloudKafka.h b/src/Storages/Kafka/StorageCloudKafka.h index e1b8c6130f..b59d4e0c4f 100644 --- a/src/Storages/Kafka/StorageCloudKafka.h +++ b/src/Storages/Kafka/StorageCloudKafka.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #if USE_RDKAFKA @@ -89,7 +90,7 @@ private: /// store server client info as global_context won't have it HostWithPorts server_client_address; - Poco::Logger * log; + LoggerPtr log; mutable std::mutex last_exception_mutex; String last_exception; UInt64 rdkafka_exception_times{0}; diff --git a/src/Storages/Kafka/StorageCnchKafka.cpp b/src/Storages/Kafka/StorageCnchKafka.cpp index 7e1ed20dd3..105539a9b3 100644 --- a/src/Storages/Kafka/StorageCnchKafka.cpp +++ b/src/Storages/Kafka/StorageCnchKafka.cpp @@ -60,7 +60,7 @@ StorageCnchKafka::StorageCnchKafka( const ASTPtr setting_changes_, const KafkaSettings & settings_) : IStorageCnchKafka(table_id_, context_, setting_changes_, settings_, columns_, constraints_), - log(&Poco::Logger::get(table_id_.getNameForLogs() + " (StorageCnchKafka)")) + log(getLogger(table_id_.getNameForLogs() + " (StorageCnchKafka)")) { } diff --git a/src/Storages/Kafka/StorageCnchKafka.h b/src/Storages/Kafka/StorageCnchKafka.h index 3eccb95ce7..6c197397cc 100644 --- a/src/Storages/Kafka/StorageCnchKafka.h +++ b/src/Storages/Kafka/StorageCnchKafka.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #if USE_RDKAFKA @@ -56,7 +57,7 @@ public: private: std::set sample_consuming_partitions_list; - Poco::Logger * log; + LoggerPtr log; protected: StorageCnchKafka( diff --git a/src/Storages/LiveView/StorageLiveView.cpp b/src/Storages/LiveView/StorageLiveView.cpp index 0dd4f27b4b..d7897bc730 100644 --- a/src/Storages/LiveView/StorageLiveView.cpp +++ b/src/Storages/LiveView/StorageLiveView.cpp @@ -259,7 +259,7 @@ StorageLiveView::StorageLiveView( live_view_context = Context::createCopy(getContext()); live_view_context->makeQueryContext(); - log = &Poco::Logger::get("StorageLiveView (" + table_id_.database_name + "." + table_id_.table_name + ")"); + log = getLogger("StorageLiveView (" + table_id_.database_name + "." + table_id_.table_name + ")"); StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); diff --git a/src/Storages/LiveView/StorageLiveView.h b/src/Storages/LiveView/StorageLiveView.h index deaf74bed4..7fcab73cda 100644 --- a/src/Storages/LiveView/StorageLiveView.h +++ b/src/Storages/LiveView/StorageLiveView.h @@ -11,6 +11,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include #include #include @@ -193,7 +194,7 @@ private: ASTPtr inner_blocks_query; /// query over the mergeable blocks to produce final result ContextMutablePtr live_view_context; - Poco::Logger * log; + LoggerPtr log; bool is_temporary = false; bool is_periodically_refreshed = false; diff --git a/src/Storages/MaterializedView/PartitionTransformer.h b/src/Storages/MaterializedView/PartitionTransformer.h index 9d06cc8bc6..ffa899462c 100644 --- a/src/Storages/MaterializedView/PartitionTransformer.h +++ b/src/Storages/MaterializedView/PartitionTransformer.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -67,7 +68,7 @@ class PartitionTransformer { public: explicit PartitionTransformer(ASTPtr mv_query_, const StorageID & target_id, bool async_materialized_view_) - : mv_query(mv_query_), target_table_id(target_id), async_materialized_view(async_materialized_view_), log(&Poco::Logger::get("PartitionTransformer")) {} + : mv_query(mv_query_), target_table_id(target_id), async_materialized_view(async_materialized_view_), log(getLogger("PartitionTransformer")) {} void validate(ContextMutablePtr local_context); void validate(ContextMutablePtr local_context, MaterializedViewStructurePtr structure); @@ -101,7 +102,7 @@ private: /// all base table std::unordered_set base_tables; - Poco::Logger * log; + LoggerPtr log; /// matererialized view sql is always non partition based bool always_non_partition_based = true; diff --git a/src/Storages/MergeTree/CloudMergeTreeBlockOutputStream.h b/src/Storages/MergeTree/CloudMergeTreeBlockOutputStream.h index d56ab456e7..1fd395bca4 100644 --- a/src/Storages/MergeTree/CloudMergeTreeBlockOutputStream.h +++ b/src/Storages/MergeTree/CloudMergeTreeBlockOutputStream.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -59,7 +60,7 @@ private: void checkAndInit(); MergeTreeMetaBase & storage; - Poco::Logger * log; + LoggerPtr log; StorageMetadataPtr metadata_snapshot; ContextPtr context; diff --git a/src/Storages/MergeTree/CnchAttachProcessor.cpp b/src/Storages/MergeTree/CnchAttachProcessor.cpp index fd2dd5902b..b5c680d5b2 100644 --- a/src/Storages/MergeTree/CnchAttachProcessor.cpp +++ b/src/Storages/MergeTree/CnchAttachProcessor.cpp @@ -448,7 +448,7 @@ String CnchAttachProcessor::relativePathTo(const String& from, const String& to) relative_path.pushDirectory(to_path[i]); } - LOG_TRACE(&Poco::Logger::get("RelativePath"), fmt::format("From {}, to {}, rel {}", from, to, relative_path.toString())); + LOG_TRACE(getLogger("RelativePath"), fmt::format("From {}, to {}, rel {}", from, to, relative_path.toString())); return relative_path.toString(); } diff --git a/src/Storages/MergeTree/CnchAttachProcessor.h b/src/Storages/MergeTree/CnchAttachProcessor.h index 3cacc1ff91..38f3831fd8 100644 --- a/src/Storages/MergeTree/CnchAttachProcessor.h +++ b/src/Storages/MergeTree/CnchAttachProcessor.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -118,7 +119,7 @@ public: std::map rename_map; }; - AttachContext(const Context& qctx, int pool_expand_thres, int max_thds, Poco::Logger* log): + AttachContext(const Context& qctx, int pool_expand_thres, int max_thds, LoggerPtr log): query_ctx(qctx), expand_thread_pool_threshold(pool_expand_thres), max_worker_threads(max_thds), new_txn(nullptr), logger(log) {} @@ -162,7 +163,7 @@ private: std::map resources; std::map meta_files_to_delete; - Poco::Logger* logger; + LoggerPtr logger; }; // Attach will follow such process @@ -180,7 +181,7 @@ public: target_tbl(tbl), from_storage(nullptr), is_unique_tbl(tbl.getInMemoryMetadataPtr()->hasUniqueKey()), command(cmd), query_ctx(ctx), - logger(&Poco::Logger::get("CnchAttachProcessor")) {} + logger(getLogger("CnchAttachProcessor")) {} void exec(); @@ -247,7 +248,7 @@ private: const PartitionCommand& command; ContextMutablePtr query_ctx; - Poco::Logger* logger; + LoggerPtr logger; // For unique table std::mutex unique_table_info_mutex; diff --git a/src/Storages/MergeTree/DanceMergeSelector.cpp b/src/Storages/MergeTree/DanceMergeSelector.cpp index 703e3c3ea5..afce363886 100644 --- a/src/Storages/MergeTree/DanceMergeSelector.cpp +++ b/src/Storages/MergeTree/DanceMergeSelector.cpp @@ -73,7 +73,7 @@ IMergeSelector::PartsRange DanceMergeSelector::select(const PartsRanges & partit { if (settings.enable_batch_select) { - LOG_ERROR(&Poco::Logger::get("DanceMergeSelector"), "Calling select() with enable_batch_select=1 is not supported!"); + LOG_ERROR(getLogger("DanceMergeSelector"), "Calling select() with enable_batch_select=1 is not supported!"); return {}; } diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index efc36f04d8..544c574c07 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -118,7 +118,7 @@ struct ReplicatedFetchReadCallback Service::Service(MergeTreeData & data_, const StoragePtr & storage_) - : data(data_), storage(storage_), log(&Poco::Logger::get(data.getLogName() + " (Replicated PartsService)")) + : data(data_), storage(storage_), log(getLogger(data.getLogName() + " (Replicated PartsService)")) { } diff --git a/src/Storages/MergeTree/DataPartsExchange.h b/src/Storages/MergeTree/DataPartsExchange.h index ce2b84469b..5c40b50297 100644 --- a/src/Storages/MergeTree/DataPartsExchange.h +++ b/src/Storages/MergeTree/DataPartsExchange.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -83,7 +84,7 @@ private: /// so Service will never access dangling reference to storage MergeTreeData & data; std::weak_ptr storage; - Poco::Logger * log; + LoggerPtr log; }; /** Client for getting the parts from the table *MergeTree. @@ -91,7 +92,7 @@ private: class Fetcher final : private boost::noncopyable { public: - explicit Fetcher(MergeTreeData & data_) : data(data_), log(&Poco::Logger::get("Fetcher")) {} + explicit Fetcher(MergeTreeData & data_) : data(data_), log(getLogger("Fetcher")) {} /// Downloads a part to tmp_directory. If to_detached - downloads to the `detached` directory. MergeTreeData::MutableDataPartPtr fetchPart( @@ -172,7 +173,7 @@ private: ThrottlerPtr throttler); MergeTreeData & data; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp b/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp index 1f194092f5..c3d28e544d 100644 --- a/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp +++ b/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp @@ -108,7 +108,7 @@ EphemeralLocksInAllPartitions::EphemeralLocksInAllPartitions( Coordination::Error rc = zookeeper->tryMulti(lock_ops, lock_responses); if (rc == Coordination::Error::ZBADVERSION) { - LOG_TRACE(&Poco::Logger::get("EphemeralLocksInAllPartitions"), "Someone has inserted a block in a new partition while we were creating locks. Retry."); + LOG_TRACE(getLogger("EphemeralLocksInAllPartitions"), "Someone has inserted a block in a new partition while we were creating locks. Retry."); continue; } else if (rc != Coordination::Error::ZOK) diff --git a/src/Storages/MergeTree/GinIndexDataPartHelper.cpp b/src/Storages/MergeTree/GinIndexDataPartHelper.cpp index a8570a95a0..db52bd2fd6 100644 --- a/src/Storages/MergeTree/GinIndexDataPartHelper.cpp +++ b/src/Storages/MergeTree/GinIndexDataPartHelper.cpp @@ -68,7 +68,7 @@ String GinDataLocalPartHelper::getPartUniqueID() const GinDataCNCHPartHelper::GinDataCNCHPartHelper(const IMergeTreeDataPartPtr& part_, const IDiskCachePtr& cache_, DiskCacheMode mode_): cache(cache_), part_checksums(part_->getChecksums()), - disk(part_->volume->getDisk()), part_rel_path(part_->getFullRelativePath()), mode(mode_), log(&Poco::Logger::get("GinDataCNCHPartHelper")) + disk(part_->volume->getDisk()), part_rel_path(part_->getFullRelativePath()), mode(mode_), log(getLogger("GinDataCNCHPartHelper")) { if (part_->getType() != IMergeTreeDataPart::Type::CNCH) { diff --git a/src/Storages/MergeTree/GinIndexDataPartHelper.h b/src/Storages/MergeTree/GinIndexDataPartHelper.h index 155eb38e5b..991c3926d1 100644 --- a/src/Storages/MergeTree/GinIndexDataPartHelper.h +++ b/src/Storages/MergeTree/GinIndexDataPartHelper.h @@ -1,6 +1,7 @@ #pragma once +#include #include #include #include @@ -77,7 +78,7 @@ private: DiskCacheMode mode; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Storages/MergeTree/GinIndexStore.cpp b/src/Storages/MergeTree/GinIndexStore.cpp index 71e72089e3..1d1f9bca45 100644 --- a/src/Storages/MergeTree/GinIndexStore.cpp +++ b/src/Storages/MergeTree/GinIndexStore.cpp @@ -438,7 +438,7 @@ void GinIndexStore::writeSegment() size_t rows_limit = std::lround(density * current_segment.total_row_size); - LOG_DEBUG(&Poco::Logger::get("gin info"), + LOG_DEBUG(getLogger("gin info"), "density {} total_size {} next_row_id {} limit {} ", density, current_segment.total_row_size, current_segment.next_row_id, rows_limit); @@ -577,7 +577,7 @@ void GinIndexStoreDeserializer::readSegments() { UInt32 num_segments = store->getNumOfSegments(); - //LOG_TRACE(&Poco::Logger::get("GinIndexStoreDeserializer"), "Get {} Gin segments ", num_segments); + //LOG_TRACE(getLogger("GinIndexStoreDeserializer"), "Get {} Gin segments ", num_segments); if (num_segments == 0) return; @@ -626,7 +626,7 @@ void GinIndexStoreDeserializer::readSegmentDictionary(UInt32 segment_id) it->second->offsets.getData().resize(fst_size); dict_file_stream->readStrict(reinterpret_cast(it->second->offsets.getData().data()), fst_size); - //LOG_TRACE(&Poco::Logger::get("GinIndexStoreDeserializer"), "Read Gin FST Dict {} size", fst_size); + //LOG_TRACE(getLogger("GinIndexStoreDeserializer"), "Read Gin FST Dict {} size", fst_size); } GinPostingsCachePtr GinIndexStoreDeserializer::createPostingsCacheFromTerms(const std::set & terms) diff --git a/src/Storages/MergeTree/Index/BitmapIndexHelper.cpp b/src/Storages/MergeTree/Index/BitmapIndexHelper.cpp index bc877ff256..f29ce06a5f 100644 --- a/src/Storages/MergeTree/Index/BitmapIndexHelper.cpp +++ b/src/Storages/MergeTree/Index/BitmapIndexHelper.cpp @@ -162,7 +162,7 @@ std::pair BitmapIndexInfo::getIndexColumns(const IMergeTreeDat } catch (...) { - tryLogCurrentException(&Poco::Logger::get("MergeTreeBitmapIndexReader"), __PRETTY_FUNCTION__); + tryLogCurrentException(getLogger("MergeTreeBitmapIndexReader"), __PRETTY_FUNCTION__); return {}; } } diff --git a/src/Storages/MergeTree/Index/MergeTreeBitmapIndex.cpp b/src/Storages/MergeTree/Index/MergeTreeBitmapIndex.cpp index 56a43ceb0f..eb744a65b2 100644 --- a/src/Storages/MergeTree/Index/MergeTreeBitmapIndex.cpp +++ b/src/Storages/MergeTree/Index/MergeTreeBitmapIndex.cpp @@ -12,7 +12,7 @@ namespace DB MergeTreeBitmapIndex::MergeTreeBitmapIndex(MergeTreeData & data_) : data(data_) - , log(&Poco::Logger::get("MergeTreeBitmapIndex")) + , log(getLogger("MergeTreeBitmapIndex")) { } diff --git a/src/Storages/MergeTree/Index/MergeTreeBitmapIndex.h b/src/Storages/MergeTree/Index/MergeTreeBitmapIndex.h index 0db934c954..4149b6f836 100644 --- a/src/Storages/MergeTree/Index/MergeTreeBitmapIndex.h +++ b/src/Storages/MergeTree/Index/MergeTreeBitmapIndex.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -36,7 +37,7 @@ public: private: MergeTreeData & data; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Storages/MergeTree/Index/MergeTreeBitmapIndexReader.cpp b/src/Storages/MergeTree/Index/MergeTreeBitmapIndexReader.cpp index da4cd42bac..b5476e7d82 100644 --- a/src/Storages/MergeTree/Index/MergeTreeBitmapIndexReader.cpp +++ b/src/Storages/MergeTree/Index/MergeTreeBitmapIndexReader.cpp @@ -76,7 +76,7 @@ MergeTreeBitmapIndexReader::MergeTreeBitmapIndexReader } catch(...) { - tryLogCurrentException(&Poco::Logger::get("MergeTreeBitmapIndexReader"), __PRETTY_FUNCTION__); + tryLogCurrentException(getLogger("MergeTreeBitmapIndexReader"), __PRETTY_FUNCTION__); valid_reader = false; break; } diff --git a/src/Storages/MergeTree/Index/MergeTreeIndexHelper.cpp b/src/Storages/MergeTree/Index/MergeTreeIndexHelper.cpp index 497a048b82..5b3b9e3dee 100644 --- a/src/Storages/MergeTree/Index/MergeTreeIndexHelper.cpp +++ b/src/Storages/MergeTree/Index/MergeTreeIndexHelper.cpp @@ -202,7 +202,7 @@ MergeTreeIndexContextPtr MergeTreeIndexContext::buildFromProjection(const Assign auto actions = ProjectionStep::createActions(bitmap_expressions, building_context.input_columns, building_context.context); - LOG_DEBUG(&Poco::Logger::get("buildFromProjection"), + LOG_DEBUG(getLogger("buildFromProjection"), fmt::format("actions: {}, index_context: {}", actions->dumpDAG(), index_context->toString())); index_context->setProjection(actions); @@ -265,7 +265,7 @@ void MergeTreeIndexContext::makeProjectionForMaterializedIndex( //actions->finalize(output_columns); - LOG_DEBUG(&Poco::Logger::get("makeProjectionForMaterializedIndex"), + LOG_DEBUG(getLogger("makeProjectionForMaterializedIndex"), fmt::format("index_output: {}, name_to_name_map: {}, output_columns: {}, actions: {}", index_output, name_to_name_map, output_columns, actions->dumpDAG())); @@ -314,7 +314,7 @@ size_t MergeTreeIndexExecutor::read(size_t from_mark, bool continue_reading, siz if (it->second && it->second->validIndexReader()) { result_rows = it->second->read(from_mark, continue_reading, max_rows_to_read, res); - LOG_TRACE(&Poco::Logger::get("MergeTreeIndexExecutor"), fmt::format("IndexExecutor ({}) read from {} mark, max_rows_to_read {}, result_rows {}", + LOG_TRACE(getLogger("MergeTreeIndexExecutor"), fmt::format("IndexExecutor ({}) read from {} mark, max_rows_to_read {}, result_rows {}", IndexTypeToString(it->first), from_mark, max_rows_to_read, result_rows)); } } diff --git a/src/Storages/MergeTree/Index/MergeTreeSegmentBitmapIndex.h b/src/Storages/MergeTree/Index/MergeTreeSegmentBitmapIndex.h index d83824a2df..cdcd538eb1 100644 --- a/src/Storages/MergeTree/Index/MergeTreeSegmentBitmapIndex.h +++ b/src/Storages/MergeTree/Index/MergeTreeSegmentBitmapIndex.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -29,6 +30,6 @@ private: static inline String getBitIdxExtension() { return SEGMENT_BITMAP_IDX_EXTENSION; } MergeTreeData & data; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Storages/MergeTree/Index/TableScanExecutorWithIndex.cpp b/src/Storages/MergeTree/Index/TableScanExecutorWithIndex.cpp index 9e1355fca9..896ed9dc94 100644 --- a/src/Storages/MergeTree/Index/TableScanExecutorWithIndex.cpp +++ b/src/Storages/MergeTree/Index/TableScanExecutorWithIndex.cpp @@ -16,7 +16,7 @@ TableScanExecutorWithIndex::TableScanExecutorWithIndex(TableScanStep & step, Con , merge_tree_reader(merge_tree_data) , select_query_info(step.getQueryInfo()) , context(std::move(context_)) -, log(&Poco::Logger::get("TableScanExecutorWithIndex")) +, log(getLogger("TableScanExecutorWithIndex")) { input_stream = step.getOutputStream(); query_required_columns = step.getRequiredColumns(); diff --git a/src/Storages/MergeTree/Index/TableScanExecutorWithIndex.h b/src/Storages/MergeTree/Index/TableScanExecutorWithIndex.h index e46a18b579..896cb33521 100644 --- a/src/Storages/MergeTree/Index/TableScanExecutorWithIndex.h +++ b/src/Storages/MergeTree/Index/TableScanExecutorWithIndex.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -36,7 +37,7 @@ private: MergeTreeDataSelectExecutor merge_tree_reader; const SelectQueryInfo & select_query_info; ContextPtr context; - Poco::Logger * log; + LoggerPtr log; DataStream input_stream; Names query_required_columns; diff --git a/src/Storages/MergeTree/LateMaterialize/MergeTreeReverseSelectProcessorLM.h b/src/Storages/MergeTree/LateMaterialize/MergeTreeReverseSelectProcessorLM.h index 6076888bf8..6589b69a20 100644 --- a/src/Storages/MergeTree/LateMaterialize/MergeTreeReverseSelectProcessorLM.h +++ b/src/Storages/MergeTree/LateMaterialize/MergeTreeReverseSelectProcessorLM.h @@ -1,4 +1,5 @@ #pragma once +#include #include #include #include @@ -38,7 +39,7 @@ protected: private: bool is_first_task; Chunks chunks; - static inline Poco::Logger * log = &Poco::Logger::get("MergeTreeReverseSelectProcessor"); + static inline LoggerPtr log = getLogger("MergeTreeReverseSelectProcessor"); }; } diff --git a/src/Storages/MergeTree/LateMaterialize/MergeTreeSelectProcessorLM.h b/src/Storages/MergeTree/LateMaterialize/MergeTreeSelectProcessorLM.h index 5823578b6f..f4c32fcba7 100644 --- a/src/Storages/MergeTree/LateMaterialize/MergeTreeSelectProcessorLM.h +++ b/src/Storages/MergeTree/LateMaterialize/MergeTreeSelectProcessorLM.h @@ -1,4 +1,5 @@ #pragma once +#include #include #include #include @@ -60,7 +61,7 @@ protected: bool check_columns; - static inline Poco::Logger * log = &Poco::Logger::get("MergeTreeSelectProcessorLM"); + static inline LoggerPtr log = getLogger("MergeTreeSelectProcessorLM"); }; } diff --git a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp index a04fb9f657..91dc7268c9 100644 --- a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp @@ -107,7 +107,7 @@ MergeTreeBaseSelectProcessor::MergeTreeBaseSelectProcessor( prewhere_actions->need_filter = prewhere_info->need_filter; LOG_TRACE( - &Poco::Logger::get("MergeTreeBaseSelectProcessor"), + getLogger("MergeTreeBaseSelectProcessor"), "Prewhere column = {}, actions = {} ", prewhere_info->prewhere_column_name, prewhere_info->prewhere_actions->dumpDAG()); diff --git a/src/Storages/MergeTree/MergeTreeCNCHDataDumper.cpp b/src/Storages/MergeTree/MergeTreeCNCHDataDumper.cpp index e0e3554d14..c421542f90 100644 --- a/src/Storages/MergeTree/MergeTreeCNCHDataDumper.cpp +++ b/src/Storages/MergeTree/MergeTreeCNCHDataDumper.cpp @@ -81,7 +81,7 @@ MergeTreeCNCHDataDumper::MergeTreeCNCHDataDumper( const MergeTreeDataFormatVersion version_) : data(data_) , generator_id(generator_id_) - , log(&Poco::Logger::get(data.getLogName() + "(CNCHDumper)")) + , log(getLogger(data.getLogName() + "(CNCHDumper)")) , magic_code(magic_code_) , version(version_) { @@ -136,7 +136,7 @@ size_t MergeTreeCNCHDataDumper::check( DiskPtr remote_disk = remote_part->volume->getDisk(); String part_data_rel_path = remote_part->getFullRelativePath() + "data"; - LOG_DEBUG(&Poco::Logger::get("MergeTreeCNCHDataDumper::check"), "Checking part {} from {}\n", remote_part->name, part_data_rel_path); + LOG_DEBUG(getLogger("MergeTreeCNCHDataDumper::check"), "Checking part {} from {}\n", remote_part->name, part_data_rel_path); size_t cnch_data_file_size = remote_disk->getFileSize(part_data_rel_path); diff --git a/src/Storages/MergeTree/MergeTreeCNCHDataDumper.h b/src/Storages/MergeTree/MergeTreeCNCHDataDumper.h index 7a4bf64042..dddbf02713 100644 --- a/src/Storages/MergeTree/MergeTreeCNCHDataDumper.h +++ b/src/Storages/MergeTree/MergeTreeCNCHDataDumper.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -128,7 +129,7 @@ private: MergeTreeMetaBase & data; S3ObjectMetadata::PartGeneratorID generator_id; - Poco::Logger * log; + LoggerPtr log; String magic_code{"CNCH"}; MergeTreeDataFormatVersion version{MERGE_TREE_CHCH_DATA_STORAGTE_VERSION}; }; diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index e300ddd0d2..cc8b611e40 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include @@ -773,10 +774,10 @@ struct CurrentlySubmergingEmergingTagger MergeTreeData & storage; String emerging_part_name; MergeTreeData::DataPartsVector submerging_parts; - Poco::Logger * log; + LoggerPtr log; CurrentlySubmergingEmergingTagger( - MergeTreeData & storage_, const String & name_, MergeTreeData::DataPartsVector && parts_, Poco::Logger * log_) + MergeTreeData & storage_, const String & name_, MergeTreeData::DataPartsVector && parts_, LoggerPtr log_) : storage(storage_), emerging_part_name(name_), submerging_parts(std::move(parts_)), log(log_) { } diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 6136f1bb68..6e7ff5faf7 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -195,7 +195,7 @@ void FutureMergedMutatedPart::updatePath(const MergeTreeMetaBase & storage, cons MergeTreeDataMergerMutator::MergeTreeDataMergerMutator(MergeTreeMetaBase & data_, size_t background_pool_size_) : data(data_) , background_pool_size(background_pool_size_) - , log(&Poco::Logger::get(data.getLogName() + " (MergerMutator)")) + , log(getLogger(data.getLogName() + " (MergerMutator)")) { } diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h index ae682b198a..1f55a7ebb9 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -338,7 +339,7 @@ private: MergeTreeMetaBase & data; const size_t background_pool_size; - Poco::Logger * log; + LoggerPtr log; /// When the last time you wrote to the log that the disk space was running out (not to write about this too often). time_t disk_space_warning_time = 0; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp index 4c2abd9497..52e73ec2e3 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp @@ -141,16 +141,16 @@ MergeTreeDataPartWriterCompact::MergeTreeDataPartWriterCompact( const MergeTreeIndexGranularity & index_granularity_, const BitmapBuildInfo & bitmap_build_info_) : MergeTreeDataPartWriterOnDisk( - data_part_, - columns_list_, + data_part_, + columns_list_, metadata_snapshot_, - indices_to_recalc_, + indices_to_recalc_, marks_file_extension_, - default_codec_, - settings_, - index_granularity_, + default_codec_, + settings_, + index_granularity_, bitmap_build_info_) - , log(&Poco::Logger::get(storage.getLogName() + " (WriterCompact)")) + , log(::getLogger(storage.getLogName() + " (WriterCompact)")) { const auto & storage_columns = metadata_snapshot->getColumns(); for (const auto & column : columns_list) diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h index 2bae1d35cb..b557622ad1 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h @@ -20,6 +20,7 @@ */ #pragma once +#include #include namespace DB @@ -60,7 +61,7 @@ private: /// and skip indices in their corresponding files. void writeDataBlockPrimaryIndexAndSkipIndices(const Block & block, const Granules & granules); - Poco::Logger * getLogger() override { return log; } + LoggerPtr getLogger() override { return log; } Block header; @@ -136,7 +137,7 @@ private: CompactDataWriterPtr data_writer; - Poco::Logger * log; + LoggerPtr log; }; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp index b920ca9482..5362e0709f 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp @@ -301,14 +301,14 @@ void MergeTreeDataPartWriterOnDisk::initSkipIndices() /** * Bitmap indices can be built from : - * 1. build in insert / merge: - * - enable build : - * build_all_bitmap_index && !only_bitmap_index: + * 1. build in insert / merge: + * - enable build : + * build_all_bitmap_index && !only_bitmap_index: * - not enable build (insert: enable_build_ab_index; merge: build_bitmap_index_in_merge): * (!build_all_bitmap_index) with empty bitmap_index_columns * 2. alter table build bitmap of partition: * - build_all_bitmap_index && only_bitmap_index - * 3. other mutation: + * 3. other mutation: * - not_build_bitmap_index * 4. build with dependent columns changed: * - (!build_all_bitmap_index && !only_bitmap_index) with dependent columns in bitmap_index_columns @@ -318,7 +318,7 @@ void MergeTreeDataPartWriterOnDisk::initBitmapIndices() if (bitmap_build_info.not_build_bitmap_index) return; - auto get_all_bitmap_columns = [&](const auto & all_columns) + auto get_all_bitmap_columns = [&](const auto & all_columns) { bitmap_build_info.bitmap_index_columns.clear(); for (const auto & column : all_columns) @@ -336,7 +336,7 @@ void MergeTreeDataPartWriterOnDisk::initBitmapIndices() else get_all_bitmap_columns(columns_list); } - + for (const auto & it : bitmap_build_info.bitmap_index_columns) { if (MergeTreeBitmapIndex::isBitmapIndexColumn(it.type) && MergeTreeBitmapIndex::needBuildIndex(data_part->getFullPath(), it.name)) @@ -356,7 +356,7 @@ void MergeTreeDataPartWriterOnDisk::initSegmentBitmapIndices() if (bitmap_build_info.not_build_segment_bitmap_index) return; - auto get_all_indexed_columns = [&](const auto & all_columns) + auto get_all_indexed_columns = [&](const auto & all_columns) { bitmap_build_info.segment_bitmap_index_columns.clear(); for (const auto & column : all_columns) @@ -374,7 +374,7 @@ void MergeTreeDataPartWriterOnDisk::initSegmentBitmapIndices() else get_all_indexed_columns(columns_list); } - + for (const auto & it : bitmap_build_info.segment_bitmap_index_columns) { if (MergeTreeSegmentBitmapIndex::isSegmentBitmapIndexColumn(it.type) && MergeTreeSegmentBitmapIndex::needBuildSegmentIndex(data_part->getFullPath(), it.name)) @@ -572,7 +572,7 @@ void MergeTreeDataPartWriterOnDisk::finishSkipIndicesSerialization( store.second->finalize(); store.second->addToChecksums(checksums); } - + for (auto & stream : skip_indices_streams) { stream->finalize(); @@ -1107,8 +1107,8 @@ void MergeTreeDataPartWriterOnDisk::writeColumn( if (write_final_mark) writeFinalMark(name_and_type, offset_columns, serialize_settings.path); - - + + serializations[name]->enumerateStreams(finalizeStreams(name), serialize_settings.path); } @@ -1376,7 +1376,7 @@ void MergeTreeDataPartWriterOnDisk::loadColumnCompressInfoFromSetting() catch (...) { column_compress_settings.clear(); - LOG_ERROR(&Poco::Logger::get("MergeTreeDataPartWriterOnDisk"), + LOG_ERROR(getLogger(), "Failed to parse column compress settings from {}", settings); } } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h index d3d7cf6bf5..72d65a7571 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -270,7 +271,7 @@ protected: WrittenOffsetColumns & offset_columns, ISerialization::SubstreamPath & path); - virtual Poco::Logger * getLogger() = 0; + virtual LoggerPtr getLogger() = 0; const MergeTreeIndices skip_indices; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index 497fed7a1d..96c16942b4 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -123,7 +123,7 @@ MergeTreeDataPartWriterWide::MergeTreeDataPartWriterWide( settings_, index_granularity_, bitmap_build_info_) - , log(&Poco::Logger::get(storage.getLogName() + " (WriterWide)")) + , log(::getLogger(storage.getLogName() + " (WriterWide)")) { const auto & columns = metadata_snapshot->getColumns(); for (const auto & it : columns_list) diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h index 3eefbb6cee..a014d28a11 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h @@ -20,6 +20,7 @@ */ #pragma once +#include #include #include @@ -88,13 +89,13 @@ private: size_t getRowsWrittenInLastMark() override { return rows_written_in_last_mark; } - Poco::Logger * getLogger() override { return log; } + LoggerPtr getLogger() override { return log; } /// How many rows we have already written in the current mark. /// More than zero when incoming blocks are smaller then their granularity. size_t rows_written_in_last_mark = 0; - Poco::Logger * log; + LoggerPtr log; /** ------------------ Unique Table Only --------------------- **/ void writeUniqueKeyIndex(Block & unique_key_block); diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 07587426b8..0247ba0304 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -111,7 +111,7 @@ namespace ErrorCodes MergeTreeDataSelectExecutor::MergeTreeDataSelectExecutor(const MergeTreeMetaBase & data_) - : data(data_), log(&Poco::Logger::get(data.getLogName() + " (SelectExecutor)")) + : data(data_), log(getLogger(data.getLogName() + " (SelectExecutor)")) { } @@ -120,7 +120,7 @@ size_t MergeTreeDataSelectExecutor::getApproximateTotalRowsToRead( const StorageMetadataPtr & metadata_snapshot, const KeyCondition & key_condition, const Settings & settings, - Poco::Logger * log) + LoggerPtr log) { size_t rows_count = 0; @@ -457,7 +457,7 @@ MergeTreeDataSelectSamplingData MergeTreeDataSelectExecutor::getSampling( const StorageMetadataPtr & metadata_snapshot, ContextPtr context, bool sample_factor_column_queried, - Poco::Logger * log) + LoggerPtr log) { const Settings & settings = context->getSettingsRef(); /// Sampling. @@ -747,7 +747,7 @@ void MergeTreeDataSelectExecutor::filterPartsByPartition( const SelectQueryInfo & query_info, const ContextPtr & context, const PartitionIdToMaxBlock * max_block_numbers_to_read, - Poco::Logger * log, + LoggerPtr log, ReadFromMergeTree::IndexStats & index_stats) { const Settings & settings = context->getSettingsRef(); @@ -867,7 +867,7 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd const ContextPtr & context, const KeyCondition & key_condition, const MergeTreeReaderSettings & reader_settings, - Poco::Logger * log_, + LoggerPtr log_, size_t num_streams, ReadFromMergeTree::IndexStats & index_stats, DelayedSkipIndex & delayed_indices_, @@ -908,7 +908,7 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd { for (const auto & index : metadata_snapshot->getSecondaryIndices()) { - LOG_TRACE(&Poco::Logger::get("filterPartsByPrimaryKeyAndSkipIndexes"),"Creating index {} {}\n", index.name, index.type); + LOG_TRACE(getLogger("filterPartsByPrimaryKeyAndSkipIndexes"),"Creating index {} {}\n", index.name, index.type); auto index_helper = MergeTreeIndexFactory::instance().get(index); if (!settings.enable_inverted_index && index_helper->isInvertedIndex()) { @@ -1169,7 +1169,7 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByIntermediateResultCa const StorageID & storage_id, const SelectQueryInfo & query_info, const ContextPtr & context, - Poco::Logger * /*log*/, + LoggerPtr /*log*/, RangesInDataParts & parts_with_ranges, CacheHolderPtr & part_cache_holder) { @@ -1682,7 +1682,7 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( const StorageMetadataPtr & metadata_snapshot, const KeyCondition & key_condition, const Settings & settings, - Poco::Logger * log) + LoggerPtr log) { MarkRanges res; @@ -1908,7 +1908,7 @@ MarkRanges MergeTreeDataSelectExecutor::filterMarksUsingIndex( size_t & total_granules, size_t & granules_dropped, roaring::Roaring * filter_bitmap, - Poco::Logger * log, + LoggerPtr log, IndexTimeWatcher & index_time_watcher) { const auto & settings = context->getSettingsRef(); @@ -2125,7 +2125,7 @@ void MergeTreeDataSelectExecutor::selectPartsToReadWithUUIDFilter( const PartitionIdToMaxBlock * max_block_numbers_to_read, ContextPtr query_context, PartFilterCounters & counters, - Poco::Logger * log) + LoggerPtr log) { const Settings & settings = query_context->getSettings(); diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h index e2aa8e4be0..0b441de3f5 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -88,7 +89,7 @@ public: private: const MergeTreeMetaBase & data; - Poco::Logger * log; + LoggerPtr log; /// Get the approximate value (bottom estimate - only by full marks) of the number of rows falling under the index. static size_t getApproximateTotalRowsToRead( @@ -96,14 +97,14 @@ private: const StorageMetadataPtr & metadata_snapshot, const KeyCondition & key_condition, const Settings & settings, - Poco::Logger * log); + LoggerPtr log); static MarkRanges markRangesFromPKRange( const MergeTreeMetaBase::DataPartPtr & part, const StorageMetadataPtr & metadata_snapshot, const KeyCondition & key_condition, const Settings & settings, - Poco::Logger * log); + LoggerPtr log); /// If filter_bitmap is nullptr, then we won't trying to generate read filter static MarkRanges filterMarksUsingIndex( @@ -116,7 +117,7 @@ private: size_t & total_granules, size_t & granules_dropped, roaring::Roaring * filter_bitmap, - Poco::Logger * log, + LoggerPtr log, IndexTimeWatcher & index_time_watcher); struct PartFilterCounters @@ -151,7 +152,7 @@ private: const PartitionIdToMaxBlock * max_block_numbers_to_read, ContextPtr query_context, PartFilterCounters & counters, - Poco::Logger * log); + LoggerPtr log); public: /// For given number rows and bytes, get the number of marks to read. @@ -188,7 +189,7 @@ public: const SelectQueryInfo & query_info, const ContextPtr & context, const PartitionIdToMaxBlock * max_block_numbers_to_read, - Poco::Logger * log, + LoggerPtr log, ReadFromMergeTree::IndexStats & index_stats); static DataTypes get_set_element_types(const NamesAndTypesList & source_columns, const String & column_name); @@ -213,7 +214,7 @@ public: const ContextPtr & context, const KeyCondition & key_condition, const MergeTreeReaderSettings & reader_settings, - Poco::Logger * log, + LoggerPtr log, size_t num_streams, ReadFromMergeTree::IndexStats & index_stats, DelayedSkipIndex & delayed_indices_, @@ -226,7 +227,7 @@ public: const StorageID & storage_id, const SelectQueryInfo & query_info, const ContextPtr & context, - Poco::Logger * log, + LoggerPtr log, RangesInDataParts & parts_with_ranges, CacheHolderPtr & part_cache_holder); @@ -242,7 +243,7 @@ public: const StorageMetadataPtr & metadata_snapshot, ContextPtr context, bool sample_factor_column_queried, - Poco::Logger * log); + LoggerPtr log); static MarkRanges sampleByRange( const MergeTreeMetaBase::DataPartPtr & part, diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index 45d52c290f..6d08dc7503 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -304,7 +304,7 @@ Block MergeTreeDataWriter::mergeBlock(const Block & block, SortDescription sort_ case MergeTreeMetaBase::MergingParams::Collapsing: return std::make_shared( block, 1, sort_description, data.merging_params.sign_column, - false, block_size + 1, &Poco::Logger::get("MergeTreeBlockOutputStream")); + false, block_size + 1, getLogger("MergeTreeBlockOutputStream")); case MergeTreeMetaBase::MergingParams::Summing: return std::make_shared( block, 1, sort_description, data.merging_params.columns_to_sum, @@ -892,7 +892,7 @@ MergeTreeMetaBase::MutableDataPartPtr MergeTreeDataWriter::writeTempPartialUpdat MergeTreeMetaBase::MutableDataPartPtr MergeTreeDataWriter::writeProjectionPartImpl( MergeTreeMetaBase & data, - Poco::Logger * log, + LoggerPtr log, Block block, const StorageMetadataPtr & metadata_snapshot, MergeTreeMetaBase::MutableDataPartPtr && new_data_part) @@ -990,7 +990,7 @@ MergeTreeDataWriter::writeProjectionPart(Block block, const ProjectionDescriptio MergeTreeMetaBase::MutableDataPartPtr MergeTreeDataWriter::writeTempProjectionPart( MergeTreeMetaBase & data, - Poco::Logger * log, + LoggerPtr log, Block block, const ProjectionDescription & projection, const IMergeTreeDataPart * parent_part, diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.h b/src/Storages/MergeTree/MergeTreeDataWriter.h index 367832ed16..715641ad8d 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.h +++ b/src/Storages/MergeTree/MergeTreeDataWriter.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include @@ -65,7 +66,7 @@ public: // use dest_policy_ to reserve space if specificed explicit MergeTreeDataWriter(MergeTreeMetaBase & data_, IStorage::StorageLocation location = IStorage::StorageLocation::MAIN): data(data_), write_location(location), - log(&Poco::Logger::get(data.getLogName() + " (Writer)")) {} + log(getLogger(data.getLogName() + " (Writer)")) {} /** Split the block to blocks, each of them must be written as separate part. * (split rows by partition) @@ -99,7 +100,7 @@ public: static MergeTreeMetaBase::MutableDataPartPtr writeTempProjectionPart( MergeTreeMetaBase & data, - Poco::Logger * log, + LoggerPtr log, Block block, const ProjectionDescription & projection, const IMergeTreeDataPart * parent_part, @@ -111,7 +112,7 @@ public: private: static MergeTreeMetaBase::MutableDataPartPtr writeProjectionPartImpl( MergeTreeMetaBase & data, - Poco::Logger * log, + LoggerPtr log, Block block, const StorageMetadataPtr & metadata_snapshot, MergeTreeMetaBase::MutableDataPartPtr && new_data_part); @@ -120,7 +121,7 @@ private: IStorage::StorageLocation write_location; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Storages/MergeTree/MergeTreeFillDeleteWithDefaultValueSource.h b/src/Storages/MergeTree/MergeTreeFillDeleteWithDefaultValueSource.h index 2b5b6f93b6..e64968eb8d 100644 --- a/src/Storages/MergeTree/MergeTreeFillDeleteWithDefaultValueSource.h +++ b/src/Storages/MergeTree/MergeTreeFillDeleteWithDefaultValueSource.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -55,7 +56,7 @@ private: /// Columns we have to read (each Block from read will contain them) Names columns_to_read; - Poco::Logger * log = &Poco::Logger::get("MergeTreeFillDeleteWithDefaultValueSource"); + LoggerPtr log = getLogger("MergeTreeFillDeleteWithDefaultValueSource"); std::shared_ptr mark_cache; using MergeTreeReaderPtr = std::unique_ptr; diff --git a/src/Storages/MergeTree/MergeTreeIndexInverted.cpp b/src/Storages/MergeTree/MergeTreeIndexInverted.cpp index 9a1e70e921..f9bb41bce9 100644 --- a/src/Storages/MergeTree/MergeTreeIndexInverted.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexInverted.cpp @@ -70,7 +70,7 @@ bool MergeTreeConditionInverted::createFunctionTextSearchCondition( TextSearchQuery tsquery(value); - LOG_TRACE(&Poco::Logger::get(__func__), tsquery.toString()); + LOG_TRACE(getLogger(__func__), tsquery.toString()); out.text_search_filter = tsquery.toTextSearchQueryExpression(params, token_extractor, nlp_extractor); @@ -691,7 +691,7 @@ bool MergeTreeConditionInverted::atomFromAST(const ASTPtr & node, Block & block_ token_extractor); if (sep_tokenizer == nullptr || !getKey(args[0], key_column_num)) { - LOG_TRACE(&Poco::Logger::get("MergeTreeConditionInverted"), + LOG_TRACE(getLogger("MergeTreeConditionInverted"), "Inverted evaluate unknown since query column didn't match index"); return false; } @@ -701,7 +701,7 @@ bool MergeTreeConditionInverted::atomFromAST(const ASTPtr & node, Block & block_ if (!KeyCondition::getConstant(args[1], block_with_constants, const_value, const_type) || !KeyCondition::getConstant(args[2], block_with_constants, seperator_const_value, seperator_const_type)) { - LOG_TRACE(&Poco::Logger::get("MergeTreeConditionInverted"), + LOG_TRACE(getLogger("MergeTreeConditionInverted"), "Inverted evaluate unknown since didn't find needle and seperator const"); return false; } @@ -709,7 +709,7 @@ bool MergeTreeConditionInverted::atomFromAST(const ASTPtr & node, Block & block_ std::unordered_set seperator_set(seperators_str.begin(), seperators_str.end()); if (seperator_set != sep_tokenizer->seperators()) { - LOG_TRACE(&Poco::Logger::get("MergeTreeConditionInverted"), + LOG_TRACE(getLogger("MergeTreeConditionInverted"), "Inverted evaluate unknown since tokenizer seperators mismatch, " "query {}, tokenizer {}", seperators_str, String(sep_tokenizer->seperators().begin(), sep_tokenizer->seperators().end())); @@ -826,7 +826,7 @@ bool MergeTreeConditionInverted::atomFromAST(const ASTPtr & node, Block & block_ ChineseTokenExtractor::stringToGinFilter(value, nlp_extractor, *out.gin_filter); } - LOG_TRACE(&Poco::Logger::get("inverted index"),"search string: {} with token : [ {} ] ", value, out.gin_filter->getTermsInString()); + LOG_TRACE(getLogger("inverted index"),"search string: {} with token : [ {} ] ", value, out.gin_filter->getTermsInString()); return true; diff --git a/src/Storages/MergeTree/MergeTreeIndexReader.cpp b/src/Storages/MergeTree/MergeTreeIndexReader.cpp index afe385f88c..044ecc8d19 100644 --- a/src/Storages/MergeTree/MergeTreeIndexReader.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexReader.cpp @@ -150,7 +150,7 @@ MergeTreeIndexReader::MergeTreeIndexReader( break; } default: - LOG_DEBUG(&Poco::Logger::get("MergeTreeIndexReader"), "Storage type: {} doesn't support secondary indexes", part_->info.storage_type); + LOG_DEBUG(getLogger("MergeTreeIndexReader"), "Storage type: {} doesn't support secondary indexes", part_->info.storage_type); break; } if(stream) stream->seekToStart(); diff --git a/src/Storages/MergeTree/MergeTreeMarksLoader.cpp b/src/Storages/MergeTree/MergeTreeMarksLoader.cpp index a7523e092e..22b241c129 100644 --- a/src/Storages/MergeTree/MergeTreeMarksLoader.cpp +++ b/src/Storages/MergeTree/MergeTreeMarksLoader.cpp @@ -156,7 +156,7 @@ MarkCache::MappedPtr MergeTreeMarksLoader::loadMarksImpl() parsed_disk_cache_host = parseAddress(part_host.disk_cache_host_port, 0).first; LOG_TRACE( - &Poco::Logger::get(__func__), + getLogger(__func__), "Current node host vs disk cache host: {} vs {}", parsed_assign_compute_host.has_value() ? removeBracketsIfIpv6(parsed_assign_compute_host.value()) : "", parsed_disk_cache_host.has_value() ? removeBracketsIfIpv6(parsed_disk_cache_host.value()) : ""); @@ -171,7 +171,7 @@ MarkCache::MappedPtr MergeTreeMarksLoader::loadMarksImpl() if (local_cache_disk && local_cache_disk->exists(local_cache_path) && settings.read_settings.disk_cache_mode != DiskCacheMode::FORCE_STEAL_DISK_CACHE) { from_disk_cache = true; - LOG_TRACE(&Poco::Logger::get(__func__), "load from local disk cache {}, mrk_path {}", local_cache_disk->getPath(), local_cache_path); + LOG_TRACE(getLogger(__func__), "load from local disk cache {}, mrk_path {}", local_cache_disk->getPath(), local_cache_path); size_t cached_mark_file_size = local_cache_disk->getFileSize(local_cache_path); if (expected_file_size != cached_mark_file_size) throw Exception( @@ -200,7 +200,7 @@ MarkCache::MappedPtr MergeTreeMarksLoader::loadMarksImpl() auto remote_cache_file = std::make_unique(remote_data_client, mark_file_size); if (remote_cache_file->getFileName().empty()) { - LOG_TRACE(&Poco::Logger::get(__func__), "load from remote filesystem mrk_path {} since remote disk cache is empty", mrk_path); + LOG_TRACE(getLogger(__func__), "load from remote filesystem mrk_path {} since remote disk cache is empty", mrk_path); auto buf = disk->readFile(mrk_path, load_mark_read_settings); if (buf->seek(mark_file_offset) != mark_file_offset) throw Exception( @@ -210,7 +210,7 @@ MarkCache::MappedPtr MergeTreeMarksLoader::loadMarksImpl() } LOG_TRACE( - &Poco::Logger::get(__func__), + getLogger(__func__), "load from remote disk cache mrk_path {}/{}, size = {}", part_host.disk_cache_host_port, remote_cache_file->getFileName(), remote_cache_file->getFileSize()); @@ -230,7 +230,7 @@ MarkCache::MappedPtr MergeTreeMarksLoader::loadMarksImpl() } } - LOG_TRACE(&Poco::Logger::get(__func__), "load from remote filesystem mrk_path {}", mrk_path); + LOG_TRACE(getLogger(__func__), "load from remote filesystem mrk_path {}", mrk_path); auto buf = disk->readFile(mrk_path, load_mark_read_settings); if (buf->seek(mark_file_offset) != mark_file_offset) throw Exception("Cannot seek to mark file " + mrk_path + " for stream " + stream_name, ErrorCodes::CANNOT_SEEK_THROUGH_FILE); diff --git a/src/Storages/MergeTree/MergeTreeMeta.cpp b/src/Storages/MergeTree/MergeTreeMeta.cpp index 6c8249252a..b8bcc13fca 100644 --- a/src/Storages/MergeTree/MergeTreeMeta.cpp +++ b/src/Storages/MergeTree/MergeTreeMeta.cpp @@ -23,7 +23,7 @@ namespace DB MergeTreeMeta::MergeTreeMeta(const String _path, const String metastore_name_) : path(_path + "catalog.db") , metastore_name(metastore_name_) - , log(&Poco::Logger::get(metastore_name + "(MetaStore)")) + , log(getLogger(metastore_name + "(MetaStore)")) { openMetastore(); } diff --git a/src/Storages/MergeTree/MergeTreeMeta.h b/src/Storages/MergeTree/MergeTreeMeta.h index 54931c1bdc..ed7ddf4b3c 100644 --- a/src/Storages/MergeTree/MergeTreeMeta.h +++ b/src/Storages/MergeTree/MergeTreeMeta.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -90,7 +91,7 @@ private: String path; String metastore_name; - Poco::Logger * log; + LoggerPtr log; MetaStorePtr metastore; std::atomic_bool closed {false}; std::mutex meta_mutex; diff --git a/src/Storages/MergeTree/MergeTreePartsMover.h b/src/Storages/MergeTree/MergeTreePartsMover.h index a1afadec7f..e511a3c086 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.h +++ b/src/Storages/MergeTree/MergeTreePartsMover.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -38,7 +39,7 @@ private: public: MergeTreePartsMover(MergeTreeData * data_) : data(data_) - , log(&Poco::Logger::get("MergeTreePartsMover")) + , log(getLogger("MergeTreePartsMover")) { } @@ -66,7 +67,7 @@ public: private: MergeTreeData * data; - Poco::Logger * log; + LoggerPtr log; }; diff --git a/src/Storages/MergeTree/MergeTreeReadPool.h b/src/Storages/MergeTree/MergeTreeReadPool.h index 2e8274d0be..b032b0e5ab 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.h +++ b/src/Storages/MergeTree/MergeTreeReadPool.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -178,7 +179,7 @@ private: mutable std::mutex mutex; - Poco::Logger * log = &Poco::Logger::get("MergeTreeReadPool"); + LoggerPtr log = getLogger("MergeTreeReadPool"); }; using MergeTreeReadPoolPtr = std::shared_ptr; diff --git a/src/Storages/MergeTree/MergeTreeReaderCNCH.cpp b/src/Storages/MergeTree/MergeTreeReaderCNCH.cpp index 71506950b7..c5edbb2602 100644 --- a/src/Storages/MergeTree/MergeTreeReaderCNCH.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderCNCH.cpp @@ -91,7 +91,7 @@ MergeTreeReaderCNCH::MergeTreeReaderCNCH( mark_cache_, mark_ranges_, settings_, avg_value_size_hints_, index_executor_) , segment_cache_strategy(nullptr) , segment_cache(nullptr) - , log(&Poco::Logger::get("MergeTreeReaderCNCH(" + data_part_->get_name() + ")")) + , log(getLogger("MergeTreeReaderCNCH(" + data_part_->get_name() + ")")) , reader_id(UUIDHelpers::UUIDToString(UUIDHelpers::generateV4())) { @@ -554,7 +554,7 @@ size_t MergeTreeReaderCNCH::readIndexColumns(size_t from_mark, bool continue_rea String output_names; for (const auto & output_name: getBitmapOutputColumns()) output_names += " " + output_name; - LOG_TRACE(&Poco::Logger::get("index_executor"), "read bitmap index file:{} for part:{}", output_names, this->data_part->name); + LOG_TRACE(getLogger("index_executor"), "read bitmap index file:{} for part:{}", output_names, this->data_part->name); #endif return bitmap_rows_read; diff --git a/src/Storages/MergeTree/MergeTreeReaderCNCH.h b/src/Storages/MergeTree/MergeTreeReaderCNCH.h index 6955c0937a..aabe8ea933 100644 --- a/src/Storages/MergeTree/MergeTreeReaderCNCH.h +++ b/src/Storages/MergeTree/MergeTreeReaderCNCH.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -82,7 +83,7 @@ private: IDiskCacheStrategyPtr segment_cache_strategy; IDiskCachePtr segment_cache; - Poco::Logger * log; + LoggerPtr log; String reader_id; }; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index a2c02a72cd..4919efc764 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -20,6 +20,7 @@ */ #pragma once +#include #include #include #include @@ -86,7 +87,7 @@ protected: bool check_columns; bool is_first_task = true; - Poco::Logger * log = &Poco::Logger::get("MergeTreeSelectProcessor"); + LoggerPtr log = getLogger("MergeTreeSelectProcessor"); }; } diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index 86d84d2208..19521abecb 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -44,7 +44,7 @@ MergeTreeSequentialSource::RuntimeContext::~RuntimeContext() size_t avg_read_rows = update_count_ > 0 ? (total_rows_ / update_count_) : 0; size_t bytes_per_row = total_rows_ > 0 ? (total_bytes_ / total_rows_) : 0; - LOG_TRACE(&Poco::Logger::get("MergeTreeSequentialSource::RuntimeContext"), + LOG_TRACE(getLogger("MergeTreeSequentialSource::RuntimeContext"), "Total rows {}, total bytes {}, read count {}, average read rows {}, bytes per row {}", total_rows_, ReadableSize(total_bytes_), update_count_, avg_read_rows, ReadableSize(bytes_per_row)); } diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.h b/src/Storages/MergeTree/MergeTreeSequentialSource.h index 86320dd5f9..c1a7833f3f 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.h +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.h @@ -20,8 +20,10 @@ */ #pragma once -#include +#include +#include #include +#include #include #include #include @@ -101,7 +103,7 @@ private: /// Should read using direct IO bool read_with_direct_io; - Poco::Logger * log = &Poco::Logger::get("MergeTreeSequentialSource"); + LoggerPtr log = getLogger("MergeTreeSequentialSource"); std::shared_ptr mark_cache; using MergeTreeReaderPtr = std::unique_ptr; diff --git a/src/Storages/MergeTree/MergeTreeSettings.cpp b/src/Storages/MergeTree/MergeTreeSettings.cpp index b87ee4bc4d..fb04e60f21 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeSettings.cpp @@ -38,7 +38,7 @@ void MergeTreeSettings::loadFromConfig(const String & config_elem, const Poco::U e.addMessage("in MergeTree config"); if (skip_unknown_settings) - LOG_ERROR(&Poco::Logger::get("MergeTreeSettings"), "Unknown setting in {} config", key); + LOG_ERROR(getLogger("MergeTreeSettings"), "Unknown setting in {} config", key); else throw; } @@ -61,7 +61,7 @@ void MergeTreeSettings::loadFromQuery(ASTStorage & storage_def, bool attach) e.addMessage("for storage " + storage_def.engine->name); if (attach) - LOG_ERROR(&Poco::Logger::get("MergeTreeSettings"), + LOG_ERROR(getLogger("MergeTreeSettings"), "Unknown setting for storage {}", storage_def.engine->name); else throw; diff --git a/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp b/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp index a5bfae2c74..eada9af97d 100644 --- a/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp @@ -45,7 +45,7 @@ MergeTreeThreadSelectBlockInputProcessor::MergeTreeThreadSelectBlockInputProcess pool_->getHeader(), storage_, storage_snapshot_, query_info_, stream_settings_, virt_column_names_}, thread{thread_}, pool{pool_}, - log(&Poco::Logger::get("MergeTreeThreadSelectBlockInputProcessor")) + log(getLogger("MergeTreeThreadSelectBlockInputProcessor")) { /// round min_marks_to_read up to nearest multiple of block_size expressed in marks /// If granularity is adaptive it doesn't make sense diff --git a/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h b/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h index 39c8fc7e15..a3e1fe4621 100644 --- a/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h +++ b/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h @@ -20,6 +20,7 @@ */ #pragma once +#include #include @@ -64,7 +65,7 @@ private: /// Names from header. Used in order to order columns in read blocks. Names ordered_names; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp index 57350af341..f47a97f7b5 100644 --- a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp +++ b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp @@ -109,7 +109,7 @@ MergeTreeWhereOptimizer::MergeTreeWhereOptimizer( std::unordered_map column_sizes_, const StorageMetadataPtr & metadata_snapshot_, const Names & queried_columns_, - Poco::Logger * log_, + LoggerPtr log_, MaterializeStrategy materialize_strategy_) : table_columns{collections::map( metadata_snapshot_->getColumns().getAllPhysical(), [](const NameAndTypePair & col) { return col.name; })} @@ -727,6 +727,8 @@ bool MergeTreeWhereOptimizer::cannotBeMoved(const ASTPtr & ptr, bool is_final) c { if (const auto * function_ptr = ptr->as()) { + LOG_DEBUG(getLogger("MergeTreeWhereOptimizer"), "[cannotBeMoved]: function: {} tree: {}", + function_ptr->name, function_ptr->dumpTree()); /// disallow arrayJoin expressions to be moved to PREWHERE for now if ("arrayJoin" == function_ptr->name) return true; @@ -978,7 +980,7 @@ void optimizePartitionPredicate(ASTPtr & query, StoragePtr storage, SelectQueryI } if (query_info.partition_filter) { - LOG_TRACE(&Poco::Logger::get("optimizePartitionPredicate"), "Optimize partition prediate push down query rewrited to {} , partiton filter-{} ", + LOG_TRACE(getLogger("optimizePartitionPredicate"), "Optimize partition prediate push down query rewrited to {} , partiton filter-{} ", queryToString(query), queryToString(query_info.partition_filter)); } } diff --git a/src/Storages/MergeTree/MergeTreeWhereOptimizer.h b/src/Storages/MergeTree/MergeTreeWhereOptimizer.h index 44b07bdfa5..f837c8c035 100644 --- a/src/Storages/MergeTree/MergeTreeWhereOptimizer.h +++ b/src/Storages/MergeTree/MergeTreeWhereOptimizer.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -73,7 +74,7 @@ public: std::unordered_map column_sizes_, const StorageMetadataPtr & metadata_snapshot_, const Names & queried_columns_, - Poco::Logger * log_, + LoggerPtr log_, MaterializeStrategy materialize_strategy_ = MaterializeStrategy::PREWHERE); std::vector && getAtomicPredicatesExpressions(); @@ -168,7 +169,7 @@ private: const Names queried_columns; const NameSet sorting_key_names; const Block block_with_constants; - Poco::Logger * log; + LoggerPtr log; std::unordered_map column_sizes; UInt64 total_size_of_queried_columns = 0; NameSet array_joined_names; diff --git a/src/Storages/MergeTree/MergeTreeWriteAheadLog.cpp b/src/Storages/MergeTree/MergeTreeWriteAheadLog.cpp index c48f240914..40e2e4bb45 100644 --- a/src/Storages/MergeTree/MergeTreeWriteAheadLog.cpp +++ b/src/Storages/MergeTree/MergeTreeWriteAheadLog.cpp @@ -200,7 +200,7 @@ MergeTreeData::MutableDataPartsVector MergeTreeWriteAheadLog::restore(const Stor || e.code() == ErrorCodes::BAD_DATA_PART_NAME || e.code() == ErrorCodes::CORRUPTED_DATA) { - LOG_WARNING(&Poco::Logger::get(storage.getLogName() + " (WriteAheadLog)"), + LOG_WARNING(getLogger(storage.getLogName() + " (WriteAheadLog)"), "WAL file '{}' is broken. {}", path, e.displayText()); /// If file is broken, do not write new parts to it. diff --git a/src/Storages/MergeTree/MergedReadBufferWithSegmentCache.cpp b/src/Storages/MergeTree/MergedReadBufferWithSegmentCache.cpp index 5377f4701e..96ce16145e 100644 --- a/src/Storages/MergeTree/MergedReadBufferWithSegmentCache.cpp +++ b/src/Storages/MergeTree/MergedReadBufferWithSegmentCache.cpp @@ -149,7 +149,7 @@ MergedReadBufferWithSegmentCache::MergedReadBufferWithSegmentCache( total_segment_count(total_segment_count_), marks_loader(marks_loader_), current_segment_idx(0), current_compressed_offset(std::nullopt), part_host(part_host_), stream_extension(stream_extension_), - logger(&Poco::Logger::get("MergedReadBufferWithSegmentCache")) + logger(getLogger("MergedReadBufferWithSegmentCache")) { initialize(); } diff --git a/src/Storages/MergeTree/MergedReadBufferWithSegmentCache.h b/src/Storages/MergeTree/MergedReadBufferWithSegmentCache.h index 62e7e032d5..3814194569 100644 --- a/src/Storages/MergeTree/MergedReadBufferWithSegmentCache.h +++ b/src/Storages/MergeTree/MergedReadBufferWithSegmentCache.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -170,7 +171,7 @@ private: String stream_extension; - Poco::Logger* logger; + LoggerPtr logger; off_t read_until_position = 0; }; diff --git a/src/Storages/MergeTree/MetastoreRocksDBImpl.cpp b/src/Storages/MergeTree/MetastoreRocksDBImpl.cpp index 92767d2ce5..9257a804bf 100644 --- a/src/Storages/MergeTree/MetastoreRocksDBImpl.cpp +++ b/src/Storages/MergeTree/MetastoreRocksDBImpl.cpp @@ -45,7 +45,7 @@ void MetastoreRocksDBImpl::MultiWrite::commit() ///MetastoreRocksDBImpl definitions MetastoreRocksDBImpl::MetastoreRocksDBImpl(const String & db_path_) : IMetaStore(db_path_) - , log(&Poco::Logger::get("MetastoreRocksDBImpl")) + , log(getLogger("MetastoreRocksDBImpl")) { init(); } diff --git a/src/Storages/MergeTree/MetastoreRocksDBImpl.h b/src/Storages/MergeTree/MetastoreRocksDBImpl.h index 0ede920af1..3fa204b611 100644 --- a/src/Storages/MergeTree/MetastoreRocksDBImpl.h +++ b/src/Storages/MergeTree/MetastoreRocksDBImpl.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -86,7 +87,7 @@ private: std::atomic_bool db_closed = true; rocksdb::DB* db = nullptr; - Poco::Logger * log; + LoggerPtr log; public: MetastoreRocksDBImpl(const String & db_path_); diff --git a/src/Storages/MergeTree/PartMovesBetweenShardsOrchestrator.cpp b/src/Storages/MergeTree/PartMovesBetweenShardsOrchestrator.cpp index 316ca916de..4f5598601b 100644 --- a/src/Storages/MergeTree/PartMovesBetweenShardsOrchestrator.cpp +++ b/src/Storages/MergeTree/PartMovesBetweenShardsOrchestrator.cpp @@ -33,7 +33,7 @@ PartMovesBetweenShardsOrchestrator::PartMovesBetweenShardsOrchestrator(StorageRe : storage(storage_) , zookeeper_path(storage.zookeeper_path) , logger_name(storage.getStorageID().getFullTableName() + " (PartMovesBetweenShardsOrchestrator)") - , log(&Poco::Logger::get(logger_name)) + , log(getLogger(logger_name)) , entries_znode_path(zookeeper_path + "/part_moves_shard") { /// Schedule pool is not designed for long-running tasks. TODO replace with a separate thread? diff --git a/src/Storages/MergeTree/PartMovesBetweenShardsOrchestrator.h b/src/Storages/MergeTree/PartMovesBetweenShardsOrchestrator.h index 9e54ae8a8e..04289f0a2e 100644 --- a/src/Storages/MergeTree/PartMovesBetweenShardsOrchestrator.h +++ b/src/Storages/MergeTree/PartMovesBetweenShardsOrchestrator.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -147,7 +148,7 @@ private: String zookeeper_path; String logger_name; - Poco::Logger * log = nullptr; + LoggerPtr log = nullptr; std::atomic need_stop{false}; BackgroundSchedulePool::TaskHolder task; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp index d88fe8813c..0010524ec8 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp @@ -72,7 +72,7 @@ ReplicatedMergeTreeBlockOutputStream::ReplicatedMergeTreeBlockOutputStream( , is_attach(is_attach_) , quorum_parallel(quorum_parallel_) , deduplicate(deduplicate_) - , log(&Poco::Logger::get(storage.getLogName() + " (Replicated OutputStream)")) + , log(getLogger(storage.getLogName() + " (Replicated OutputStream)")) , context(context_) { /// The quorum value `1` has the same meaning as if it is disabled. diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h b/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h index a3fce65a84..e06a6de43c 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h @@ -1,12 +1,11 @@ #pragma once +#include #include #include #include -namespace Poco { class Logger; } - namespace zkutil { class ZooKeeper; @@ -80,8 +79,7 @@ private: bool deduplicate = true; bool last_block_is_duplicate = false; - using Logger = Poco::Logger; - Poco::Logger * log; + LoggerPtr log; ContextPtr context; }; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp index 10e2d77eb2..2bf3781544 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp @@ -22,7 +22,7 @@ namespace ErrorCodes ReplicatedMergeTreeCleanupThread::ReplicatedMergeTreeCleanupThread(StorageReplicatedMergeTree & storage_) : storage(storage_) , log_name(storage.getStorageID().getFullTableName() + " (ReplicatedMergeTreeCleanupThread)") - , log(&Poco::Logger::get(log_name)) + , log(getLogger(log_name)) { task = storage.getContext()->getSchedulePool().createTask(log_name, [this]{ run(); }); } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h index 939a40db8c..15273478f7 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -35,7 +36,7 @@ public: private: StorageReplicatedMergeTree & storage; String log_name; - Poco::Logger * log; + LoggerPtr log; BackgroundSchedulePool::TaskHolder task; pcg64 rng{randomSeed()}; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp index 25365c269b..379df11dcb 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp @@ -48,7 +48,7 @@ static const auto PART_CHECK_ERROR_SLEEP_MS = 5 * 1000; ReplicatedMergeTreePartCheckThread::ReplicatedMergeTreePartCheckThread(StorageReplicatedMergeTree & storage_) : storage(storage_) , log_name(storage.getStorageID().getFullTableName() + " (ReplicatedMergeTreePartCheckThread)") - , log(&Poco::Logger::get(log_name)) + , log(getLogger(log_name)) { task = storage.getContext()->getSchedulePool().createTask(log_name, [this] { run(); }); task->schedule(); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h index 8257898fe3..3f1413d9bc 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -97,7 +98,7 @@ private: StorageReplicatedMergeTree & storage; String log_name; - Poco::Logger * log; + LoggerPtr log; using StringSet = std::set; using PartToCheck = std::pair; /// The name of the part and the minimum time to check (or zero, if not important). diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 12e2a9429f..32602482ff 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -53,7 +53,7 @@ ReplicatedMergeTreeQueue::ReplicatedMergeTreeQueue(StorageReplicatedMergeTree & zookeeper_path = storage.zookeeper_path; replica_path = storage.replica_path; logger_name = storage.getStorageID().getFullTableName() + " (ReplicatedMergeTreeQueue)"; - log = &Poco::Logger::get(logger_name); + log = getLogger(logger_name); } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index b3da5225fb..cf50206ac0 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -65,7 +66,7 @@ private: String zookeeper_path; String replica_path; String logger_name; - Poco::Logger * log = nullptr; + LoggerPtr log = nullptr; /// Protects the queue, future_parts and other queue state variables. mutable std::mutex state_mutex; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp index 29946ce363..d710eb9775 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp @@ -42,7 +42,7 @@ static String generateActiveNodeIdentifier() ReplicatedMergeTreeRestartingThread::ReplicatedMergeTreeRestartingThread(StorageReplicatedMergeTree & storage_) : storage(storage_) , log_name(storage.getStorageID().getFullTableName() + " (ReplicatedMergeTreeRestartingThread)") - , log(&Poco::Logger::get(log_name)) + , log(getLogger(log_name)) , active_node_identifier(generateActiveNodeIdentifier()) { const auto storage_settings = storage.getSettings(); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h index cb10d62834..1918e18953 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -33,7 +34,7 @@ public: private: StorageReplicatedMergeTree & storage; String log_name; - Poco::Logger * log; + LoggerPtr log; std::atomic need_stop {false}; // We need it besides `storage.is_readonly`, because `shutdown()` may be called many times, that way `storage.is_readonly` will not change. diff --git a/src/Storages/MergeTree/S3PartsAttachMeta.cpp b/src/Storages/MergeTree/S3PartsAttachMeta.cpp index 328f4a6cd1..76a1726744 100644 --- a/src/Storages/MergeTree/S3PartsAttachMeta.cpp +++ b/src/Storages/MergeTree/S3PartsAttachMeta.cpp @@ -36,7 +36,7 @@ S3PartsLazyCleaner::S3PartsLazyCleaner( const std::optional & generator_id_, size_t max_threads_, size_t batch_clean_size_) - : logger(&Poco::Logger::get("S3PartsLazyCleaner")) + : logger(getLogger("S3PartsLazyCleaner")) , data_key_prefix(data_key_prefix_) , generator_id(generator_id_) , lazy_cleaner(nullptr) diff --git a/src/Storages/MergeTree/S3PartsAttachMeta.h b/src/Storages/MergeTree/S3PartsAttachMeta.h index 7ed11df656..2af30bc2c0 100644 --- a/src/Storages/MergeTree/S3PartsAttachMeta.h +++ b/src/Storages/MergeTree/S3PartsAttachMeta.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -40,7 +41,7 @@ public: private: bool filterPartToRemove(const S3::S3Util & s3_util_, const String & key_); - Poco::Logger * logger; + LoggerPtr logger; const String data_key_prefix; std::optional generator_id; diff --git a/src/Storages/MergeTree/SimpleMergeSelector.cpp b/src/Storages/MergeTree/SimpleMergeSelector.cpp index a60c464286..f54b898ff7 100644 --- a/src/Storages/MergeTree/SimpleMergeSelector.cpp +++ b/src/Storages/MergeTree/SimpleMergeSelector.cpp @@ -295,7 +295,7 @@ SimpleMergeSelector::selectMulti(const PartsRanges & partitions, size_t max_tota res.push_back(current_parts); } - LOG_DEBUG(&Poco::Logger::get("SimpleBatchMergeSelector"), "Selected {} groups to merge", res.size()); + LOG_DEBUG(getLogger("SimpleBatchMergeSelector"), "Selected {} groups to merge", res.size()); return res; } diff --git a/src/Storages/NexusFS/NexusFS.h b/src/Storages/NexusFS/NexusFS.h index 946493a6c5..0cdcbbca17 100644 --- a/src/Storages/NexusFS/NexusFS.h +++ b/src/Storages/NexusFS/NexusFS.h @@ -7,6 +7,7 @@ #include +#include #include #include #include @@ -81,7 +82,7 @@ public: void loadFromConfig(const Poco::Util::AbstractConfiguration & conf); private: - Poco::Logger * log = &Poco::Logger::get("NexusFSConfig"); + LoggerPtr log = getLogger("NexusFSConfig"); NexusFSConfig & validate(); File openFile(const std::string & file_name, UInt64 size, bool truncate); @@ -194,7 +195,7 @@ private: UInt32 alignedSize(UInt32 size) const { return roundup(size, alloc_align_size); } NexusFSComponents::NexusFSIndex::LookupResult load(const HybridCache::HashedKey &key, off_t offset_in_source, std::unique_ptr &source, std::shared_ptr &insert_cxt); - + void writeEntry(HybridCache::RelAddress addr, UInt32 slot_size, const HybridCache::HashedKey &key, HybridCache::BufferView value); size_t readEntry(const HybridCache::RegionDescriptor &desc, HybridCache::RelAddress addr, UInt32 size, char *to); @@ -222,7 +223,7 @@ private: // std::shared_ptr loadToMemoryCache(const NexusFSComponents::NexusFSIndex::LookupResult &lr); - Poco::Logger * log = &Poco::Logger::get("NexusFS"); + LoggerPtr log = getLogger("NexusFS"); const Protos::NexusFSConfig serialized_config; diff --git a/src/Storages/PartCacheManager.cpp b/src/Storages/PartCacheManager.cpp index 8ab81be8b3..f5ca5f7cb6 100644 --- a/src/Storages/PartCacheManager.cpp +++ b/src/Storages/PartCacheManager.cpp @@ -88,7 +88,7 @@ PartCacheManager::PartCacheManager(ContextMutablePtr context_, const size_t memo size_t size_of_cached_storage = getContext()->getConfigRef().getUInt("cnch_max_cached_storage", 10000); size_t data_cache_min_lifetime = getContext()->getConfigRef().getUInt("data_cache_min_lifetime", 1800); LOG_DEBUG( - &Poco::Logger::get("PartCacheManager"), + getLogger("PartCacheManager"), "Memory limit is {} bytes, Part cache size is {}, delete bitmap size is {}, storage cache size is {} (in unit).", memory_limit, size_of_cached_parts, @@ -193,7 +193,7 @@ void PartCacheManager::mayUpdateTableMeta(const IStorage & storage, const PairIn catch (...) { /// Handle bytekv exceptions and make sure next time will retry - tryLogCurrentException(&Poco::Logger::get("PartCacheManager::mayUpdateTableMeta")); + tryLogCurrentException(getLogger("PartCacheManager::mayUpdateTableMeta")); meta_ptr->cache_status = CacheStatus::UINIT; throw; } @@ -253,7 +253,7 @@ void PartCacheManager::mayUpdateTableMeta(const IStorage & storage, const PairIn { it->second->cache_status = CacheStatus::UINIT; meta_ptr = it->second; - LOG_DEBUG(&Poco::Logger::get("PartCacheManager::MetaEntry"), "Invalid part cache because of cache version mismatch for table {}.{}", meta_ptr->database, meta_ptr->table); + LOG_DEBUG(getLogger("PartCacheManager::MetaEntry"), "Invalid part cache because of cache version mismatch for table {}.{}", meta_ptr->database, meta_ptr->table); } } } @@ -278,7 +278,7 @@ void PartCacheManager::mayUpdateTableMeta(const IStorage & storage, const PairIn /// Does not interfere with the primary logic. catch (...) { - tryLogCurrentException(&Poco::Logger::get("PartCacheManager::mayUpdateTableMeta")); + tryLogCurrentException(getLogger("PartCacheManager::mayUpdateTableMeta")); } } } @@ -320,7 +320,7 @@ bool PartCacheManager::checkIfCacheValidWithNHUT(const UUID & uuid, const UInt64 /// try invalid the part cache if the cached nhut is old enough; if (table_entry->need_invalid_cache && getContext()->getPhysicalTimestamp() - table_entry->cached_non_host_update_ts > 9000) { - LOG_DEBUG(&Poco::Logger::get("PartCacheManager::getTableMeta"), "invalid part cache for {}. NHUT is {}", UUIDHelpers::UUIDToString(uuid), table_entry->cached_non_host_update_ts); + LOG_DEBUG(getLogger("PartCacheManager::getTableMeta"), "invalid part cache for {}. NHUT is {}", UUIDHelpers::UUIDToString(uuid), table_entry->cached_non_host_update_ts); invalidPartAndDeleteBitmapCache(uuid); } @@ -349,7 +349,7 @@ TableMetaEntryPtr PartCacheManager::getTableMeta(const UUID & uuid) std::unique_lock lock(cache_mutex); if (active_tables.find(uuid) == active_tables.end()) { - LOG_TRACE(&Poco::Logger::get("PartCacheManager::getTableMeta"), "Table id {} not found in active_tables", UUIDHelpers::UUIDToString(uuid)); + LOG_TRACE(getLogger("PartCacheManager::getTableMeta"), "Table id {} not found in active_tables", UUIDHelpers::UUIDToString(uuid)); return nullptr; } @@ -579,7 +579,7 @@ void PartCacheManager::invalidCacheWithNewTopology(const CnchServerTopology & to auto server = topology.getTargetServer(UUIDHelpers::UUIDToString(it->first), it->second->server_vw_name); if (!isLocalServer(server.getRPCAddress(), rpc_port)) { - LOG_DEBUG(&Poco::Logger::get("PartCacheManager::invalidCacheWithNewTopology"), "Dropping part cache of {}", UUIDHelpers::UUIDToString(it->first)); + LOG_DEBUG(getLogger("PartCacheManager::invalidCacheWithNewTopology"), "Dropping part cache of {}", UUIDHelpers::UUIDToString(it->first)); part_cache_ptr->dropCache(it->first); delete_bitmap_cache_ptr->dropCache(it->first); storageCachePtr->remove(it->second->database, it->second->table); @@ -640,7 +640,7 @@ void PartCacheManager::invalidPartCacheWithoutLock( } } } - LOG_DEBUG(&Poco::Logger::get("PartCacheManager::invalidPartCacheWithoutLock"), "Dropping part cache of {}", UUIDHelpers::UUIDToString(uuid)); + LOG_DEBUG(getLogger("PartCacheManager::invalidPartCacheWithoutLock"), "Dropping part cache of {}", UUIDHelpers::UUIDToString(uuid)); if (!skip_part_cache) part_cache_ptr->dropCache(uuid); if (!skip_delete_bitmap_cache) @@ -1026,7 +1026,7 @@ void PartCacheManager::loadActiveTables() auto tables_meta = getContext()->getCnchCatalog()->getAllTables(); if (tables_meta.empty()) return; - LOG_DEBUG(&Poco::Logger::get("PartCacheManager"), "Reloading {} active tables.", tables_meta.size()); + LOG_DEBUG(getLogger("PartCacheManager"), "Reloading {} active tables.", tables_meta.size()); auto rpc_port = getContext()->getRPCPort(); for (auto & table_meta : tables_meta) @@ -1071,7 +1071,7 @@ static const size_t LOG_PARTS_SIZE = 100000; static void logPartsVector(const MergeTreeMetaBase & storage, const ServerDataPartsVector & res) { if (unlikely(res.size() % LOG_PARTS_SIZE == 0)) - LOG_DEBUG(&Poco::Logger::get("PartCacheManager"), "{} getting parts and now loaded {} parts in memory", storage.getStorageID().getNameForLogs(), res.size()); + LOG_DEBUG(getLogger("PartCacheManager"), "{} getting parts and now loaded {} parts in memory", storage.getStorageID().getNameForLogs(), res.size()); } */ @@ -1198,7 +1198,7 @@ RetValueVec PartCacheManager::getDataInternal( } LOG_DEBUG( - &Poco::Logger::get("PartCacheManager"), + getLogger("PartCacheManager"), "Waiting for loading parts for table {} use {} threads.", storage.getStorageID().getNameForLogs(), max_threads); @@ -1396,7 +1396,7 @@ template < static_assert(DependentFalse::value, "invalid template type for CachePtr"); } - LOG_DEBUG(&Poco::Logger::get("PartCacheManager"), "Get {} by partitions for table : {}", type, storage.getLogName()); + LOG_DEBUG(getLogger("PartCacheManager"), "Get {} by partitions for table : {}", type, storage.getLogName()); Stopwatch watch; UUID uuid = storage.getStorageUUID(); @@ -1533,7 +1533,7 @@ template < lock, std::chrono::milliseconds(5000), [&cache_status]() { return cache_status->isLoaded(); })) { LOG_TRACE( - &Poco::Logger::get("PartCacheManager"), + getLogger("PartCacheManager"), "Wait timeout 5000ms for other thread loading table: {}, partition: {}", storage.getStorageID().getNameForLogs(), partition_id); @@ -1596,7 +1596,7 @@ template < } LOG_DEBUG( - &Poco::Logger::get("PartCacheManager"), + getLogger("PartCacheManager"), "Waiting for loading parts for table {} use {} threads.", storage.getStorageID().getNameForLogs(), max_threads); @@ -1697,7 +1697,7 @@ std::unordered_map> PartCacheManager::getTable void PartCacheManager::reset() { - LOG_DEBUG(&Poco::Logger::get("PartCacheManager::reset"), "Resetting part cache manager."); + LOG_DEBUG(getLogger("PartCacheManager::reset"), "Resetting part cache manager."); std::unique_lock lock(cache_mutex); { /// 1. Remember the current size of the active_tables. @@ -1761,7 +1761,7 @@ size_t PartCacheManager::cleanTrashedActiveTables() { void PartCacheManager::shutDown() { - LOG_DEBUG(&Poco::Logger::get("PartCacheManager::shutdown"), "Shutdown method of part cache manager called."); + LOG_DEBUG(getLogger("PartCacheManager::shutdown"), "Shutdown method of part cache manager called."); table_partition_metrics.shutDown(this); active_table_loader->deactivate(); meta_lock_cleaner->deactivate(); @@ -1824,7 +1824,7 @@ bool PartCacheManager::forceRecalculate(StoragePtr table) const auto host_port = getContext()->getCnchTopologyMaster()->getTargetServer( UUIDHelpers::UUIDToString(table->getStorageUUID()), table->getServerVwName(), true); - auto * log = &Poco::Logger::get("PartCacheManager::forceRecalculate"); + auto log = getLogger("PartCacheManager::forceRecalculate"); if (!host_port.empty() && !isLocalServer(host_port.getRPCAddress(), std::to_string(getContext()->getRPCPort()))) { try @@ -1929,7 +1929,7 @@ PartCacheManager::getLastModificationTimeHints(const ConstStoragePtr & storage, if (!data.has_value() || (data->first.total_parts_number < 0 || data->first.total_rows_count < 0)) { LOG_WARNING( - &Poco::Logger::get("PartCacheManager::getLastModificationTimeHints"), + getLogger("PartCacheManager::getLastModificationTimeHints"), "Can not get partition metrics for partition {} from snapshots.", partition); hint.set_last_modification_time(0); diff --git a/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp index 1fc279bff2..37a37a6b7f 100644 --- a/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp @@ -27,7 +27,7 @@ MaterializedPostgreSQLConsumer::MaterializedPostgreSQLConsumer( const size_t max_block_size_, bool allow_automatic_update_, Storages storages_) - : log(&Poco::Logger::get("PostgreSQLReaplicaConsumer")) + : log(getLogger("PostgreSQLReaplicaConsumer")) , context(context_) , replication_slot_name(replication_slot_name_) , publication_name(publication_name_) @@ -65,7 +65,7 @@ void MaterializedPostgreSQLConsumer::Buffer::createEmptyBuffer(StoragePtr storag auto insert_columns = std::make_shared(); auto table_id = storage->getStorageID(); - LOG_TRACE(&Poco::Logger::get("MaterializedPostgreSQLBuffer"), "New buffer for table {}.{} ({}), structure: {}", + LOG_TRACE(getLogger("MaterializedPostgreSQLBuffer"), "New buffer for table {}.{} ({}), structure: {}", table_id.database_name, table_id.table_name, toString(table_id.uuid), sample_block.dumpStructure()); assert(description.sample_block.columns() == storage_columns.size()); diff --git a/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.h b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.h index 8f3224784f..04af14629a 100644 --- a/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.h +++ b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -94,7 +95,7 @@ private: return (static_cast(upper_half) << 32) + lower_half; } - Poco::Logger * log; + LoggerPtr log; ContextPtr context; const std::string replication_slot_name, publication_name; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 141fe5245a..c0cf764e99 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -32,7 +32,7 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( bool is_attach_, const MaterializedPostgreSQLSettings & replication_settings, bool is_materialized_postgresql_database_) - : log(&Poco::Logger::get("PostgreSQLReplicationHandler")) + : log(getLogger("PostgreSQLReplicationHandler")) , context(context_) , is_attach(is_attach_) , remote_database_name(remote_database_name_) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index a04c6b037c..6e7ce21ec5 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -1,5 +1,6 @@ #pragma once +#include #include "MaterializedPostgreSQLConsumer.h" #include "MaterializedPostgreSQLSettings.h" #include @@ -80,7 +81,7 @@ private: PostgreSQLTableStructurePtr fetchTableStructure(pqxx::ReplicationTransaction & tx, const String & table_name) const; - Poco::Logger * log; + LoggerPtr log; ContextPtr context; /// If it is not attach, i.e. a create query, then if publication already exists - always drop it. diff --git a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp index c297e1f580..94b8c98b86 100644 --- a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp @@ -126,7 +126,7 @@ StoragePtr StorageMaterializedPostgreSQL::createTemporary() const auto tmp_storage = DatabaseCatalog::instance().tryGetTable(tmp_table_id, nested_context); if (tmp_storage) { - LOG_TRACE(&Poco::Logger::get("MaterializedPostgreSQLStorage"), "Temporary table {} already exists, dropping", tmp_table_id.getNameForLogs()); + LOG_TRACE(getLogger("MaterializedPostgreSQLStorage"), "Temporary table {} already exists, dropping", tmp_table_id.getNameForLogs()); InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, getContext(), getContext(), tmp_table_id, /* no delay */true); } diff --git a/src/Storages/RabbitMQ/RabbitMQConnection.cpp b/src/Storages/RabbitMQ/RabbitMQConnection.cpp index 7231c74ac2..b560c8fb83 100644 --- a/src/Storages/RabbitMQ/RabbitMQConnection.cpp +++ b/src/Storages/RabbitMQ/RabbitMQConnection.cpp @@ -11,7 +11,7 @@ static const auto CONNECT_SLEEP = 200; static const auto RETRIES_MAX = 20; -RabbitMQConnection::RabbitMQConnection(const RabbitMQConfiguration & configuration_, Poco::Logger * log_) +RabbitMQConnection::RabbitMQConnection(const RabbitMQConfiguration & configuration_, LoggerPtr log_) : configuration(configuration_) , log(log_) , event_handler(loop.getLoop(), log) diff --git a/src/Storages/RabbitMQ/RabbitMQConnection.h b/src/Storages/RabbitMQ/RabbitMQConnection.h index 7cf4cf71fa..2774616516 100644 --- a/src/Storages/RabbitMQ/RabbitMQConnection.h +++ b/src/Storages/RabbitMQ/RabbitMQConnection.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -19,7 +20,7 @@ struct RabbitMQConfiguration class RabbitMQConnection { public: - RabbitMQConnection(const RabbitMQConfiguration & configuration_, Poco::Logger * log_); + RabbitMQConnection(const RabbitMQConfiguration & configuration_, LoggerPtr log_); bool isConnected(); @@ -48,7 +49,7 @@ private: void disconnectImpl(bool immediately = false); RabbitMQConfiguration configuration; - Poco::Logger * log; + LoggerPtr log; UVLoop loop; RabbitMQHandler event_handler; diff --git a/src/Storages/RabbitMQ/RabbitMQHandler.cpp b/src/Storages/RabbitMQ/RabbitMQHandler.cpp index 85d8063a73..bd0c55183d 100644 --- a/src/Storages/RabbitMQ/RabbitMQHandler.cpp +++ b/src/Storages/RabbitMQ/RabbitMQHandler.cpp @@ -8,7 +8,7 @@ namespace DB /* The object of this class is shared between concurrent consumers (who share the same connection == share the same * event loop and handler). */ -RabbitMQHandler::RabbitMQHandler(uv_loop_t * loop_, Poco::Logger * log_) : +RabbitMQHandler::RabbitMQHandler(uv_loop_t * loop_, LoggerPtr log_) : AMQP::LibUvHandler(loop_), loop(loop_), log(log_), diff --git a/src/Storages/RabbitMQ/RabbitMQHandler.h b/src/Storages/RabbitMQ/RabbitMQHandler.h index 52db4a1a8f..150a05c0d9 100644 --- a/src/Storages/RabbitMQ/RabbitMQHandler.h +++ b/src/Storages/RabbitMQ/RabbitMQHandler.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -23,7 +24,7 @@ class RabbitMQHandler : public AMQP::LibUvHandler { public: - RabbitMQHandler(uv_loop_t * loop_, Poco::Logger * log_); + RabbitMQHandler(uv_loop_t * loop_, LoggerPtr log_); void onError(AMQP::TcpConnection * connection, const char * message) override; void onReady(AMQP::TcpConnection * connection) override; @@ -49,7 +50,7 @@ public: private: uv_loop_t * loop; - Poco::Logger * log; + LoggerPtr log; std::atomic connection_running, loop_running; std::atomic loop_state; diff --git a/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp b/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp index f284478e8d..eba429246a 100644 --- a/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp +++ b/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp @@ -41,7 +41,7 @@ ReadBufferFromRabbitMQConsumer::ReadBufferFromRabbitMQConsumer( std::vector & queues_, size_t channel_id_base_, const String & channel_base_, - Poco::Logger * log_, + LoggerPtr log_, char row_delimiter_, uint32_t queue_size_, const std::atomic & stopped_) diff --git a/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.h b/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.h index 4725f92d65..9ebda3e113 100644 --- a/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.h +++ b/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -25,7 +26,7 @@ public: std::vector & queues_, size_t channel_id_base_, const String & channel_base_, - Poco::Logger * log_, + LoggerPtr log_, char row_delimiter_, uint32_t queue_size_, const std::atomic & stopped_); @@ -92,7 +93,7 @@ private: std::vector queues; const String channel_base; const size_t channel_id_base; - Poco::Logger * log; + LoggerPtr log; char row_delimiter; bool allowed = true; const std::atomic & stopped; diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp index 4a79dc9486..2f044e57a3 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp @@ -90,7 +90,7 @@ StorageRabbitMQ::StorageRabbitMQ( , persistent(rabbitmq_settings->rabbitmq_persistent.value) , use_user_setup(rabbitmq_settings->rabbitmq_queue_consume.value) , hash_exchange(num_consumers > 1 || num_queues > 1) - , log(&Poco::Logger::get("StorageRabbitMQ (" + table_id_.table_name + ")")) + , log(getLogger("StorageRabbitMQ (" + table_id_.table_name + ")")) , semaphore(0, num_consumers) , unique_strbase(getRandomName()) , queue_size(std::max(QUEUE_SIZE, static_cast(getMaxBlockSize()))) diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.h b/src/Storages/RabbitMQ/StorageRabbitMQ.h index 8f341af906..539ab3d868 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.h +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -100,7 +101,7 @@ private: bool use_user_setup; bool hash_exchange; - Poco::Logger * log; + LoggerPtr log; RabbitMQConnectionPtr connection; /// Connection for all consumers RabbitMQConfiguration configuration; diff --git a/src/Storages/RabbitMQ/WriteBufferToRabbitMQProducer.cpp b/src/Storages/RabbitMQ/WriteBufferToRabbitMQProducer.cpp index 550f3cd27c..d5afe78315 100644 --- a/src/Storages/RabbitMQ/WriteBufferToRabbitMQProducer.cpp +++ b/src/Storages/RabbitMQ/WriteBufferToRabbitMQProducer.cpp @@ -54,7 +54,7 @@ WriteBufferToRabbitMQProducer::WriteBufferToRabbitMQProducer( const size_t channel_id_base_, const bool persistent_, std::atomic & wait_confirm_, - Poco::Logger * log_, + LoggerPtr log_, std::optional delimiter, size_t rows_per_message, size_t chunk_size_) diff --git a/src/Storages/RabbitMQ/WriteBufferToRabbitMQProducer.h b/src/Storages/RabbitMQ/WriteBufferToRabbitMQProducer.h index 8ed1ea643f..6ab46205d2 100644 --- a/src/Storages/RabbitMQ/WriteBufferToRabbitMQProducer.h +++ b/src/Storages/RabbitMQ/WriteBufferToRabbitMQProducer.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -26,7 +27,7 @@ public: const size_t channel_id_base_, const bool persistent_, std::atomic & wait_confirm_, - Poco::Logger * log_, + LoggerPtr log_, std::optional delimiter, size_t rows_per_message, size_t chunk_size_ @@ -110,7 +111,7 @@ private: /// Record of pending acknowledgements from the server; its size never exceeds size of returned.queue std::map> delivery_record; - Poco::Logger * log; + LoggerPtr log; const std::optional delim; const size_t max_rows; const size_t chunk_size; diff --git a/src/Storages/RemoteDiskCacheService.cpp b/src/Storages/RemoteDiskCacheService.cpp index 79dcd1cac9..6bfcd8b791 100644 --- a/src/Storages/RemoteDiskCacheService.cpp +++ b/src/Storages/RemoteDiskCacheService.cpp @@ -24,14 +24,14 @@ String RemoteDiskCacheService::getFileFullPath(const String & key) auto [disk, path] = disk_cache->get(key); if (!disk) { - LOG_WARNING(&Poco::Logger::get("RemoteDiskCacheService"), "Can't find the cache key: {}", key); + LOG_WARNING(getLogger("RemoteDiskCacheService"), "Can't find the cache key: {}", key); return ""; } if (!disk->exists(path)) { LOG_WARNING( - &Poco::Logger::get("RemoteDiskCacheService"), + getLogger("RemoteDiskCacheService"), "Find the cache key but the cache data path is not exist: {}->{}", key, fullPath(disk, path)); diff --git a/src/Storages/RemoteFile/CnchFileCommon.cpp b/src/Storages/RemoteFile/CnchFileCommon.cpp index 9b42a9892e..df7fe62b77 100644 --- a/src/Storages/RemoteFile/CnchFileCommon.cpp +++ b/src/Storages/RemoteFile/CnchFileCommon.cpp @@ -61,7 +61,7 @@ void StorageS3Configuration::updateS3Client(const ContextPtr & ctx, const CnchFi }); LOG_DEBUG( - &Poco::Logger::get("StorageS3Configuration"), + getLogger("StorageS3Configuration"), fmt::format( "update s3 client, config: {}, region = {}, endpoint = {}, bucket = {}, key = {}, ak/sk = {} -> {}", uri.toString(), diff --git a/src/Storages/RemoteFile/IStorageCloudFile.cpp b/src/Storages/RemoteFile/IStorageCloudFile.cpp index 97f5536f49..6b39c6426d 100644 --- a/src/Storages/RemoteFile/IStorageCloudFile.cpp +++ b/src/Storages/RemoteFile/IStorageCloudFile.cpp @@ -75,7 +75,7 @@ public: if (current_path.empty()) return false; - LOG_TRACE(&Poco::Logger::get("FileBlockInputStream"), "{} start to read {}", client->type(), current_path); + LOG_TRACE(getLogger("FileBlockInputStream"), "{} start to read {}", client->type(), current_path); auto current_compression = chooseCompressionMethod(current_path, compression_method); auto current_format = FormatFactory::instance().getFormatFromFileName(current_path, true, format_name); FormatFactory::instance().checkFormatName(current_format); @@ -244,7 +244,7 @@ public: size_t max_block_size_, size_t num_streams_, const CnchFileArguments & arguments_, - Poco::Logger * log_) + LoggerPtr log_) : ISourceStep(DataStream{.header = metadata_snapshot_->getSampleBlockForColumns(real_column_names_, virtual_, id_)}) , client(client_) , data_parts(parts_) @@ -306,7 +306,7 @@ private: size_t num_streams; CnchFileArguments arguments; - Poco::Logger * log; + LoggerPtr log; Pipe read(ContextPtr & query_context_, size_t num_streams_, const Names & column_names, const UInt64 & max_block_size_) { diff --git a/src/Storages/RemoteFile/IStorageCloudFile.h b/src/Storages/RemoteFile/IStorageCloudFile.h index 443f6ddeeb..6d41b580ba 100644 --- a/src/Storages/RemoteFile/IStorageCloudFile.h +++ b/src/Storages/RemoteFile/IStorageCloudFile.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -70,7 +71,7 @@ public: FileDataPartsCNCHVector parts{}; private: - Poco::Logger * log = &Poco::Logger::get("IStorageCloudFile"); + LoggerPtr log = getLogger("IStorageCloudFile"); }; } diff --git a/src/Storages/RemoteFile/IStorageCnchFile.h b/src/Storages/RemoteFile/IStorageCnchFile.h index a598746a97..4b39c48e06 100644 --- a/src/Storages/RemoteFile/IStorageCnchFile.h +++ b/src/Storages/RemoteFile/IStorageCnchFile.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -104,7 +105,7 @@ public: Block virtual_header; private: - Poco::Logger * log = &Poco::Logger::get("StorageCnchFile"); + LoggerPtr log = getLogger("StorageCnchFile"); }; } diff --git a/src/Storages/RemoteFile/StorageCloudHDFS.cpp b/src/Storages/RemoteFile/StorageCloudHDFS.cpp index f67298c032..c0c8a6c3e1 100644 --- a/src/Storages/RemoteFile/StorageCloudHDFS.cpp +++ b/src/Storages/RemoteFile/StorageCloudHDFS.cpp @@ -94,7 +94,7 @@ void registerStorageCloudHDFS(StorageFactory & factory) CnchFileSettings settings = args.getContext()->getCnchFileSettings(); settings.loadFromQuery(*args.storage_def); LOG_TRACE( - &Poco::Logger::get("StorageCloudHDFS"), + getLogger("StorageCloudHDFS"), fmt::format( "create cloud hdfs table: database={}, table={}, url={}, format={}, compression={}", database, diff --git a/src/Storages/RemoteFile/StorageCloudHDFS.h b/src/Storages/RemoteFile/StorageCloudHDFS.h index 4ae4806b56..592e40b8d1 100644 --- a/src/Storages/RemoteFile/StorageCloudHDFS.h +++ b/src/Storages/RemoteFile/StorageCloudHDFS.h @@ -1,5 +1,6 @@ #pragma once +#include #include #if USE_HDFS @@ -38,7 +39,7 @@ public: }; - Poco::Logger * log = &Poco::Logger::get("StorageCloudHDFS"); + LoggerPtr log = getLogger("StorageCloudHDFS"); ~StorageCloudHDFS() override = default; diff --git a/src/Storages/RemoteFile/StorageCloudS3.cpp b/src/Storages/RemoteFile/StorageCloudS3.cpp index 01af7579cc..d48456c11b 100644 --- a/src/Storages/RemoteFile/StorageCloudS3.cpp +++ b/src/Storages/RemoteFile/StorageCloudS3.cpp @@ -141,7 +141,7 @@ void registerStorageCloudS3(StorageFactory & factory) CnchFileSettings settings = args.getContext()->getCnchFileSettings(); settings.loadFromQuery(*args.storage_def); LOG_TRACE( - &Poco::Logger::get("StorageCloudS3"), + getLogger("StorageCloudS3"), fmt::format( "create cloud S3 table: database={}, table={}, url={}, format={}, compression={}", database, diff --git a/src/Storages/RemoteFile/StorageCloudS3.h b/src/Storages/RemoteFile/StorageCloudS3.h index 810e6b7248..bb2af23bbe 100644 --- a/src/Storages/RemoteFile/StorageCloudS3.h +++ b/src/Storages/RemoteFile/StorageCloudS3.h @@ -1,5 +1,6 @@ #pragma once +#include #include #if USE_AWS_S3 @@ -37,7 +38,7 @@ public: StorageS3Configuration config; private: - Poco::Logger * log = &Poco::Logger::get("StorageCloudS3"); + LoggerPtr log = getLogger("StorageCloudS3"); public: StorageCloudS3( diff --git a/src/Storages/RemoteFile/StorageCnchHDFS.h b/src/Storages/RemoteFile/StorageCnchHDFS.h index f32c7b482c..5ef0523184 100644 --- a/src/Storages/RemoteFile/StorageCnchHDFS.h +++ b/src/Storages/RemoteFile/StorageCnchHDFS.h @@ -1,4 +1,5 @@ #pragma once +#include #include #if USE_HDFS @@ -35,7 +36,7 @@ public: ~StorageCnchHDFS() override = default; private: - Poco::Logger * log = &Poco::Logger::get("StorageCnchHDFS"); + LoggerPtr log = getLogger("StorageCnchHDFS"); public: StorageCnchHDFS( diff --git a/src/Storages/RemoteFile/StorageCnchS3.cpp b/src/Storages/RemoteFile/StorageCnchS3.cpp index 12f40ada45..c3a5bd0cd1 100644 --- a/src/Storages/RemoteFile/StorageCnchS3.cpp +++ b/src/Storages/RemoteFile/StorageCnchS3.cpp @@ -90,7 +90,7 @@ Strings ListKeysWithRegexpMatching( } LOG_TRACE( - &Poco::Logger::get("StorageCnchS3"), + getLogger("StorageCnchS3"), "List {} with prefix `{}`, total keys = {}, filter keys = {} ", globbed_s3_uri.toString(), key_prefix, @@ -199,7 +199,7 @@ void registerStorageCnchS3(StorageFactory & factory) arguments.partition_by = args.storage_def->partition_by->clone(); LOG_TRACE( - &Poco::Logger::get("StorageCnchS3"), + getLogger("StorageCnchS3"), fmt::format( "create CNCH S3 table: url={}, format={}, compression={}", arguments.url, diff --git a/src/Storages/RemoteFile/StorageCnchS3.h b/src/Storages/RemoteFile/StorageCnchS3.h index f1171fa87c..55bfe8ca0d 100644 --- a/src/Storages/RemoteFile/StorageCnchS3.h +++ b/src/Storages/RemoteFile/StorageCnchS3.h @@ -1,5 +1,6 @@ #pragma once +#include #include #if USE_AWS_S3 @@ -39,7 +40,7 @@ public: StorageS3Configuration config; private: - Poco::Logger * log = &Poco::Logger::get("StorageCnchS3"); + LoggerPtr log = getLogger("StorageCnchS3"); public: StorageCnchS3( diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index ebdee7efc0..c35156ff38 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -140,7 +140,7 @@ StorageBuffer::StorageBuffer( , flush_thresholds(flush_thresholds_) , destination_id(destination_id_) , allow_materialized(allow_materialized_) - , log(&Poco::Logger::get("StorageBuffer (" + table_id_.getFullTableName() + ")")) + , log(getLogger("StorageBuffer (" + table_id_.getFullTableName() + ")")) , bg_pool(getContext()->getBufferFlushSchedulePool()) { StorageInMemoryMetadata storage_metadata; diff --git a/src/Storages/StorageBuffer.h b/src/Storages/StorageBuffer.h index 22c89db22d..a575721dab 100644 --- a/src/Storages/StorageBuffer.h +++ b/src/Storages/StorageBuffer.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -152,7 +153,7 @@ private: Writes lifetime_writes; Writes total_writes; - Poco::Logger * log; + LoggerPtr log; void flushAllBuffers(bool check_thresholds = true, bool reset_blocks_structure = false); /// Reset the buffer. If check_thresholds is set - resets only if thresholds diff --git a/src/Storages/StorageCnchMergeTree.cpp b/src/Storages/StorageCnchMergeTree.cpp index b8546cbf0b..83584ee130 100644 --- a/src/Storages/StorageCnchMergeTree.cpp +++ b/src/Storages/StorageCnchMergeTree.cpp @@ -158,6 +158,7 @@ static ASTPtr getBasicSelectQuery(const ASTPtr & original_query) else if (select.prewhere()) select.setExpression(ASTSelectQuery::Expression::WHERE, select.prewhere()->clone()); select.setExpression(ASTSelectQuery::Expression::PREWHERE, nullptr); + LOG_DEBUG(getLogger("getBasicSelectQuery"), "original: {}, result: {}", queryToString(original_query), queryToString(select)); return query; } diff --git a/src/Storages/StorageDictCloudMergeTree.cpp b/src/Storages/StorageDictCloudMergeTree.cpp index f9287035f7..b7b464c8df 100644 --- a/src/Storages/StorageDictCloudMergeTree.cpp +++ b/src/Storages/StorageDictCloudMergeTree.cpp @@ -41,7 +41,7 @@ StorageDictCloudMergeTree::StorageDictCloudMergeTree( ) , split_writer(*this, IStorage::StorageLocation::AUXILITY) { - log = &Poco::Logger::get(table_id_.getNameForLogs() + " (DictCloudMergeTree)"); + log = ::getLogger(table_id_.getNameForLogs() + " (DictCloudMergeTree)"); init(); } diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index a6bb101346..ea0d0b54cf 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -476,7 +476,7 @@ StorageDistributed::StorageDistributed( , remote_database(remote_database_) , remote_table(remote_table_) , remote_table_function_ptr(remote_table_function_ptr_) - , log(&Poco::Logger::get("StorageDistributed (" + id_.table_name + ")")) + , log(getLogger("StorageDistributed (" + id_.table_name + ")")) , owned_cluster(std::move(owned_cluster_)) , cluster_name(getContext()->getMacros()->expand(cluster_name_)) , sharding_key(sharding_key_) diff --git a/src/Storages/StorageDistributed.h b/src/Storages/StorageDistributed.h index 820af0fdbb..24f2b20195 100644 --- a/src/Storages/StorageDistributed.h +++ b/src/Storages/StorageDistributed.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include @@ -237,7 +238,7 @@ private: String remote_table; ASTPtr remote_table_function_ptr; - Poco::Logger * log; + LoggerPtr log; /// Used to implement TableFunctionRemote. std::shared_ptr owned_cluster; diff --git a/src/Storages/StorageExternalDistributed.cpp b/src/Storages/StorageExternalDistributed.cpp index f4a7b39417..791ce54899 100644 --- a/src/Storages/StorageExternalDistributed.cpp +++ b/src/Storages/StorageExternalDistributed.cpp @@ -167,7 +167,7 @@ StorageExternalDistributed::StorageExternalDistributed( shard = std::make_shared( uri, table_id, format_name, format_settings, columns, constraints, String{}, context, compression_method); - LOG_DEBUG(&Poco::Logger::get("StorageURLDistributed"), "Adding URL: {}", url_description); + LOG_DEBUG(getLogger("StorageURLDistributed"), "Adding URL: {}", url_description); } shards.emplace(std::move(shard)); diff --git a/src/Storages/StorageFile.h b/src/Storages/StorageFile.h index cbd4169562..19f8e4c22f 100644 --- a/src/Storages/StorageFile.h +++ b/src/Storages/StorageFile.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -102,7 +103,7 @@ private: mutable std::shared_timed_mutex rwlock; - Poco::Logger * log = &Poco::Logger::get("StorageFile"); + LoggerPtr log = getLogger("StorageFile"); /// Total number of bytes to read (sums for multiple files in case of globs). Needed for progress bar. size_t total_bytes_to_read = 0; diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index 1e8d16e7ab..6a1edf7324 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -102,7 +102,7 @@ StorageMaterializedView::StorageMaterializedView( , WithMutableContext(local_context->getGlobalContext()) , refresh_schedule(query.refresh_strategy) , cache(MaterializedViewVersionedPartCache::getInstance()) - , log(&Poco::Logger::get("StorageMaterializedView")) + , log(getLogger("StorageMaterializedView")) { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); @@ -1464,14 +1464,14 @@ bool StorageMaterializedView::checkPartitionExpr(StoragePtr target_table, ASTPtr partition_expr->collectIdentifierNames(id_set); for (const auto & name : id_set) { - LOG_TRACE(&Poco::Logger::get("checkPartitionExpr"), "partition_expr name: {}", name); + LOG_TRACE(getLogger("checkPartitionExpr"), "partition_expr name: {}", name); } IdentifierNameSet id_set_target; partition_key.expression_list_ast->collectIdentifierNames(id_set_target); for (const auto & name : id_set_target) { - LOG_TRACE(&Poco::Logger::get("checkPartitionExpr"), "target table partition key name: {}", name); + LOG_TRACE(getLogger("checkPartitionExpr"), "target table partition key name: {}", name); if (id_set.count(name)) return true; } @@ -1497,7 +1497,7 @@ void StorageMaterializedView::refreshWhere(ASTPtr partition_expr, ContextMutable if (!cnch_select_table) throw Exception("Materialized view select table is not CnchMergeTree", ErrorCodes::LOGICAL_ERROR); - LOG_DEBUG(&Poco::Logger::get("refreshWhere"), "partition_expr: {}", serializeAST(*partition_expr)); + LOG_DEBUG(getLogger("refreshWhere"), "partition_expr: {}", serializeAST(*partition_expr)); if (!checkPartitionExpr(target_table, partition_expr, local_context)) throw Exception("Refresh Materialized view without partition key", ErrorCodes::LOGICAL_ERROR); diff --git a/src/Storages/StorageMaterializedView.h b/src/Storages/StorageMaterializedView.h index 806e8d6b08..35dc07e389 100644 --- a/src/Storages/StorageMaterializedView.h +++ b/src/Storages/StorageMaterializedView.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include @@ -188,7 +189,7 @@ private: // mv meta cache MaterializedViewVersionedPartCache & cache; - Poco::Logger * log; + LoggerPtr log; protected: StorageMaterializedView( diff --git a/src/Storages/StorageMySQL.cpp b/src/Storages/StorageMySQL.cpp index 6a680b609c..d94d777ae7 100644 --- a/src/Storages/StorageMySQL.cpp +++ b/src/Storages/StorageMySQL.cpp @@ -68,7 +68,7 @@ StorageMySQL::StorageMySQL( , on_duplicate_clause{on_duplicate_clause_} , mysql_settings(mysql_settings_) , pool(std::make_shared(pool_)) - , logger(&Poco::Logger::get(getStorageID().getNameForLogs())) + , logger(getLogger(getStorageID().getNameForLogs())) { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); diff --git a/src/Storages/StorageMySQL.h b/src/Storages/StorageMySQL.h index d0f7a6032b..7e3413cff1 100644 --- a/src/Storages/StorageMySQL.h +++ b/src/Storages/StorageMySQL.h @@ -1,6 +1,7 @@ #pragma once #if !defined(ARCADIA_BUILD) +#include #include "config_core.h" #endif @@ -67,7 +68,7 @@ private: mysqlxx::PoolWithFailoverPtr pool; - Poco::Logger * logger; + LoggerPtr logger; }; } diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 74350add3d..67344a9a92 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -849,7 +849,7 @@ void StorageReplicatedMergeTree::drop() dropAllData(); } -void StorageReplicatedMergeTree::dropReplica(zkutil::ZooKeeperPtr zookeeper, const String & zookeeper_path, const String & replica, Poco::Logger * logger) +void StorageReplicatedMergeTree::dropReplica(zkutil::ZooKeeperPtr zookeeper, const String & zookeeper_path, const String & replica, LoggerPtr logger) { if (zookeeper->expired()) throw Exception("Table was not dropped because ZooKeeper session has expired.", ErrorCodes::TABLE_WAS_NOT_DROPPED); @@ -914,7 +914,7 @@ void StorageReplicatedMergeTree::dropReplica(zkutil::ZooKeeperPtr zookeeper, con } bool StorageReplicatedMergeTree::removeTableNodesFromZooKeeper(zkutil::ZooKeeperPtr zookeeper, - const String & zookeeper_path, const zkutil::EphemeralNodeHolder::Ptr & metadata_drop_lock, Poco::Logger * logger) + const String & zookeeper_path, const zkutil::EphemeralNodeHolder::Ptr & metadata_drop_lock, LoggerPtr logger) { bool completely_removed = false; Strings children; diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index bdb72ddc3f..2c067d6dee 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include #include @@ -230,11 +231,11 @@ public: /** Remove a specific replica from zookeeper. */ - static void dropReplica(zkutil::ZooKeeperPtr zookeeper, const String & zookeeper_path, const String & replica, Poco::Logger * logger); + static void dropReplica(zkutil::ZooKeeperPtr zookeeper, const String & zookeeper_path, const String & replica, LoggerPtr logger); /// Removes table from ZooKeeper after the last replica was dropped static bool removeTableNodesFromZooKeeper(zkutil::ZooKeeperPtr zookeeper, const String & zookeeper_path, - const zkutil::EphemeralNodeHolder::Ptr & metadata_drop_lock, Poco::Logger * logger); + const zkutil::EphemeralNodeHolder::Ptr & metadata_drop_lock, LoggerPtr logger); /// Schedules job to execute in background pool (merge, mutate, drop range and so on) bool scheduleDataProcessingJob(IBackgroundJobExecutor & executor) override; diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index d3da48d64c..29f0acfd82 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -599,7 +599,7 @@ bool StorageS3Source::initialize(size_t idx) request_settings.max_single_read_retries), chooseCompressionMethod(key_with_info->key, compression_hint)); - LOG_DEBUG(&Poco::Logger::get("StorageS3Source"), "max parsing threads = {} need_only_count = {}", max_parsing_threads, need_only_count); + LOG_DEBUG(getLogger("StorageS3Source"), "max parsing threads = {} need_only_count = {}", max_parsing_threads, need_only_count); auto input_format = FormatFactory::instance().getInput( format, *read_buf, sample_block, getContext(), max_block_size); @@ -1188,7 +1188,7 @@ void StorageS3::truncate(const ASTPtr & /* query */, const StorageMetadataPtr &, } for (const auto & error : response.GetResult().GetErrors()) - LOG_WARNING(&Poco::Logger::get("StorageS3"), "Failed to delete {}, error: {}", error.GetKey(), error.GetMessage()); + LOG_WARNING(getLogger("StorageS3"), "Failed to delete {}, error: {}", error.GetKey(), error.GetMessage()); } namespace @@ -1234,7 +1234,7 @@ namespace /// For default mode check cached columns for currently read keys on first iteration. if (first && getContext()->getSettingsRef().schema_inference_mode == SchemaInferenceMode::DEFAULT) { - LOG_TRACE(&Poco::Logger::get("StorageS3Source"), "ReadBufferIterator first get columns from cache."); + LOG_TRACE(getLogger("StorageS3Source"), "ReadBufferIterator first get columns from cache."); if (auto cached_columns = tryGetColumnsFromCache(read_keys.begin(), read_keys.end())) return {nullptr, cached_columns, format}; @@ -1265,7 +1265,7 @@ namespace return {nullptr, std::nullopt, format}; } - LOG_TRACE(&Poco::Logger::get("StorageS3Source"), "ReadBufferIterator read_keys size {} prev_read_keys_size = {}", read_keys.size(), prev_read_keys_size); + LOG_TRACE(getLogger("StorageS3Source"), "ReadBufferIterator read_keys size {} prev_read_keys_size = {}", read_keys.size(), prev_read_keys_size); /// S3 file iterator could get new keys after new iteration if (read_keys.size() > prev_read_keys_size) @@ -1308,7 +1308,7 @@ namespace } } - LOG_TRACE(&Poco::Logger::get("StorageS3Source"), "ReadBufferFromS3 bucket {} key {}", configuration.url.bucket, current_key_with_info->key); + LOG_TRACE(getLogger("StorageS3Source"), "ReadBufferFromS3 bucket {} key {}", configuration.url.bucket, current_key_with_info->key); auto impl = std::make_unique( configuration.client, @@ -1473,7 +1473,7 @@ std::pair StorageS3::getTableStructureAndFormatFromD const ContextPtr & ctx) { KeysWithInfo read_keys; - LOG_TRACE(&Poco::Logger::get("StorageS3"), " getTableStructureAndFormatFromDataImpl start createFileIterator "); + LOG_TRACE(getLogger("StorageS3"), " getTableStructureAndFormatFromDataImpl start createFileIterator "); auto file_iterator = createFileIterator(configuration, false, ctx, {}, &read_keys); ReadBufferIterator read_buffer_iterator(file_iterator, read_keys, configuration, format, format_settings, ctx); diff --git a/src/Storages/StorageSet.cpp b/src/Storages/StorageSet.cpp index 318be78ce0..d7a228ff3c 100644 --- a/src/Storages/StorageSet.cpp +++ b/src/Storages/StorageSet.cpp @@ -218,7 +218,7 @@ void StorageSetOrJoinBase::restoreFromFile(const String & file_path) backup_stream.readSuffix(); /// TODO Add speed, compressed bytes, data volume in memory, compression ratio ... Generalize all statistics logging in project. - LOG_INFO(&Poco::Logger::get("StorageSetOrJoinBase"), "Loaded from backup file {}. {} rows, {}. State has {} unique rows.", + LOG_INFO(getLogger("StorageSetOrJoinBase"), "Loaded from backup file {}. {} rows, {}. State has {} unique rows.", file_path, backup_stream.getProfileInfo().rows, ReadableSize(backup_stream.getProfileInfo().bytes), getSize()); } diff --git a/src/Storages/StorageStripeLog.cpp b/src/Storages/StorageStripeLog.cpp index 02f0f411b0..c56ada479a 100644 --- a/src/Storages/StorageStripeLog.cpp +++ b/src/Storages/StorageStripeLog.cpp @@ -286,7 +286,7 @@ StorageStripeLog::StorageStripeLog( , table_path(relative_path_) , max_compress_block_size(max_compress_block_size_) , file_checker(disk, table_path + "sizes.json") - , log(&Poco::Logger::get("StorageStripeLog")) + , log(getLogger("StorageStripeLog")) { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); diff --git a/src/Storages/StorageStripeLog.h b/src/Storages/StorageStripeLog.h index 8ae8dda41b..f7d5d6525c 100644 --- a/src/Storages/StorageStripeLog.h +++ b/src/Storages/StorageStripeLog.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -71,7 +72,7 @@ private: FileChecker file_checker; std::shared_timed_mutex rwlock; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Storages/StorageTinyLog.cpp b/src/Storages/StorageTinyLog.cpp index cf71d0620f..b45baa0e8d 100644 --- a/src/Storages/StorageTinyLog.cpp +++ b/src/Storages/StorageTinyLog.cpp @@ -416,7 +416,7 @@ StorageTinyLog::StorageTinyLog( , table_path(relative_path_) , max_compress_block_size(max_compress_block_size_) , file_checker(disk, table_path + "sizes.json") - , log(&Poco::Logger::get("StorageTinyLog")) + , log(getLogger("StorageTinyLog")) { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); diff --git a/src/Storages/StorageTinyLog.h b/src/Storages/StorageTinyLog.h index 96955d7119..91d0b954c9 100644 --- a/src/Storages/StorageTinyLog.h +++ b/src/Storages/StorageTinyLog.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -73,7 +74,7 @@ private: FileChecker file_checker; std::shared_timed_mutex rwlock; - Poco::Logger * log; + LoggerPtr log; void addFiles(const NameAndTypePair & column); }; diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index 871379a885..e87c953969 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -354,7 +354,7 @@ StorageURLWithFailover::StorageURLWithFailover( Poco::URI poco_uri(uri_option); context_->getRemoteHostFilter().checkURL(poco_uri); uri_options.emplace_back(std::move(poco_uri)); - LOG_DEBUG(&Poco::Logger::get("StorageURLDistributed"), "Adding URL option: {}", uri_option); + LOG_DEBUG(getLogger("StorageURLDistributed"), "Adding URL option: {}", uri_option); } } diff --git a/src/Storages/StorageXDBC.cpp b/src/Storages/StorageXDBC.cpp index 313ab36182..53249bc440 100644 --- a/src/Storages/StorageXDBC.cpp +++ b/src/Storages/StorageXDBC.cpp @@ -47,7 +47,7 @@ StorageXDBC::StorageXDBC( , remote_database_name(remote_database_name_) , remote_table_name(remote_table_name_) { - log = &Poco::Logger::get("Storage" + bridge_helper->getName()); + log = getLogger("Storage" + bridge_helper->getName()); uri = bridge_helper->getMainURI(); } diff --git a/src/Storages/StorageXDBC.h b/src/Storages/StorageXDBC.h index 0f40c2627e..604b8a6ac1 100644 --- a/src/Storages/StorageXDBC.h +++ b/src/Storages/StorageXDBC.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -42,7 +43,7 @@ private: std::string remote_database_name; std::string remote_table_name; - Poco::Logger * log; + LoggerPtr log; std::string getReadMethod() const override; diff --git a/src/Storages/System/StorageSystemCnchAsyncQueries.cpp b/src/Storages/System/StorageSystemCnchAsyncQueries.cpp index 99a3db7836..a4cd48decb 100644 --- a/src/Storages/System/StorageSystemCnchAsyncQueries.cpp +++ b/src/Storages/System/StorageSystemCnchAsyncQueries.cpp @@ -53,12 +53,12 @@ void StorageSystemCnchAsyncQueries::fillData(MutableColumns & res_columns, Conte } else { - LOG_TRACE(&Poco::Logger::get(getName()), "return empty result with async_query_id {}", selected_id); + LOG_TRACE(getLogger(getName()), "return empty result with async_query_id {}", selected_id); } } else { - LOG_TRACE(&Poco::Logger::get(getName()), "doesn't do any filtering"); + LOG_TRACE(getLogger(getName()), "doesn't do any filtering"); } } } diff --git a/src/Storages/System/StorageSystemCnchColumns.cpp b/src/Storages/System/StorageSystemCnchColumns.cpp index 0ea06e5af3..28ded561dc 100644 --- a/src/Storages/System/StorageSystemCnchColumns.cpp +++ b/src/Storages/System/StorageSystemCnchColumns.cpp @@ -183,7 +183,7 @@ void StorageSystemCnchColumns::fillData(MutableColumns & res_columns, ContextPtr UInt64 time_pass_ms = stop_watch.elapsedMilliseconds(); if (time_pass_ms > 2000) - LOG_INFO(&Poco::Logger::get("StorageSystemCnchColumns"), + LOG_INFO(getLogger("StorageSystemCnchColumns"), "cnch_catalog->getAllTables() took {} ms", time_pass_ms); const auto access = context->getAccess(); diff --git a/src/Storages/System/StorageSystemCnchCommon.cpp b/src/Storages/System/StorageSystemCnchCommon.cpp index 23e7784547..2dd6744518 100644 --- a/src/Storages/System/StorageSystemCnchCommon.cpp +++ b/src/Storages/System/StorageSystemCnchCommon.cpp @@ -82,6 +82,6 @@ std::vector> DB::filterTables(const ContextPt for (size_t i = 0; i < database_column_res->size(); ++i) res.emplace_back((*database_fullname_column_res)[i].get(), (*database_column_res)[i].get(), (*table_name_column_res)[i].get()); - LOG_DEBUG(&Poco::Logger::get("SystemCnchParts"), "Got {} tables from catalog after filter", res.size()); + LOG_DEBUG(getLogger("SystemCnchParts"), "Got {} tables from catalog after filter", res.size()); return res; } diff --git a/src/Storages/System/StorageSystemCnchKafkaTables.cpp b/src/Storages/System/StorageSystemCnchKafkaTables.cpp index 4033151351..5efd8ac3c2 100644 --- a/src/Storages/System/StorageSystemCnchKafkaTables.cpp +++ b/src/Storages/System/StorageSystemCnchKafkaTables.cpp @@ -116,7 +116,7 @@ Pipe StorageSystemCnchKafkaTables::read( query_info.cluster = context->mockCnchServersCluster(); QueryPlan query_plan; - Poco::Logger * log = &Poco::Logger::get("SystemCnchKafkaTables"); + LoggerPtr log = getLogger("SystemCnchKafkaTables"); ClusterProxy::executeQuery(query_plan, select_stream_factory, log, select_query, context, query_info, nullptr, {}, nullptr); return query_plan.convertToPipe(QueryPlanOptimizationSettings::fromContext(context), BuildQueryPipelineSettings::fromContext(context)); diff --git a/src/Storages/System/StorageSystemCnchMaterializedMySQL.cpp b/src/Storages/System/StorageSystemCnchMaterializedMySQL.cpp index 9e6c4bab52..94df5d913f 100644 --- a/src/Storages/System/StorageSystemCnchMaterializedMySQL.cpp +++ b/src/Storages/System/StorageSystemCnchMaterializedMySQL.cpp @@ -104,7 +104,7 @@ Pipe StorageSystemCnchMaterializedMySQL::read( query_info.cluster = context->mockCnchServersCluster(); QueryPlan query_plan; - Poco::Logger * log = &Poco::Logger::get("SystemCnchMaterializedMySQL"); + LoggerPtr log = getLogger("SystemCnchMaterializedMySQL"); ClusterProxy::executeQuery(query_plan, select_stream_factory, log, select_query, context, query_info, nullptr, {}, nullptr); return query_plan.convertToPipe(QueryPlanOptimizationSettings::fromContext(context), BuildQueryPipelineSettings::fromContext(context)); diff --git a/src/Storages/System/StorageSystemCnchParts.cpp b/src/Storages/System/StorageSystemCnchParts.cpp index c788363996..3ca33a4166 100644 --- a/src/Storages/System/StorageSystemCnchParts.cpp +++ b/src/Storages/System/StorageSystemCnchParts.cpp @@ -122,7 +122,7 @@ void StorageSystemCnchParts::fillData(MutableColumns & res_columns, ContextPtr c only_selected_table = table_it->second.getType() == Field::Types::String ? table_it->second.get() : ""; enable_filter_by_table = true; - LOG_TRACE(&Poco::Logger::get("StorageSystemCnchParts"), + LOG_TRACE(getLogger("StorageSystemCnchParts"), "filtering from catalog by table with db name {} and table name {}", only_selected_db, only_selected_table); } @@ -132,14 +132,14 @@ void StorageSystemCnchParts::fillData(MutableColumns & res_columns, ContextPtr c only_selected_partition_id = partition_it->second.getType() == Field::Types::String ? partition_it->second.get() : ""; enable_filter_by_partition = true; - LOG_TRACE(&Poco::Logger::get("StorageSystemCnchParts"), + LOG_TRACE(getLogger("StorageSystemCnchParts"), "filtering from catalog by partition with partition name {}", only_selected_partition_id); } } if (!(enable_filter_by_partition || enable_filter_by_table)) - LOG_TRACE(&Poco::Logger::get("StorageSystemCnchParts"), "No explicitly table and partition provided in where expression"); + LOG_TRACE(getLogger("StorageSystemCnchParts"), "No explicitly table and partition provided in where expression"); // check for required structure of WHERE clause for cnch_parts if (!enable_filter_by_table) diff --git a/src/Storages/System/StorageSystemCnchPartsColumns.cpp b/src/Storages/System/StorageSystemCnchPartsColumns.cpp index 4fad82f872..718cb4a507 100644 --- a/src/Storages/System/StorageSystemCnchPartsColumns.cpp +++ b/src/Storages/System/StorageSystemCnchPartsColumns.cpp @@ -54,7 +54,7 @@ NamesAndTypesList StorageSystemCnchPartsColumns::getNamesAndTypes() void StorageSystemCnchPartsColumns::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo & query_info) const { - Poco::Logger * log = &Poco::Logger::get(getName()); + LoggerPtr log = getLogger(getName()); ASTPtr where_expression = query_info.query->as()->where(); diff --git a/src/Storages/System/StorageSystemCnchPartsInfo.cpp b/src/Storages/System/StorageSystemCnchPartsInfo.cpp index 47b1183d23..3215da6162 100644 --- a/src/Storages/System/StorageSystemCnchPartsInfo.cpp +++ b/src/Storages/System/StorageSystemCnchPartsInfo.cpp @@ -88,7 +88,7 @@ Pipe StorageSystemCnchPartsInfo::read( Block header = materializeBlock(InterpreterSelectQuery(ast, context, QueryProcessingStage::Complete).getSampleBlock()); QueryPlan query_plan; - Poco::Logger * log = &Poco::Logger::get("SystemPartsInfo"); + LoggerPtr log = getLogger("SystemPartsInfo"); ClusterProxy::SelectStreamFactory stream_factory = ClusterProxy::SelectStreamFactory( header, diff --git a/src/Storages/System/StorageSystemCnchPartsInfoLocal.cpp b/src/Storages/System/StorageSystemCnchPartsInfoLocal.cpp index 1913d912c0..9f678caeed 100644 --- a/src/Storages/System/StorageSystemCnchPartsInfoLocal.cpp +++ b/src/Storages/System/StorageSystemCnchPartsInfoLocal.cpp @@ -202,7 +202,7 @@ Pipe StorageSystemCnchPartsInfoLocal::read( { if (e.code() == ErrorCodes::CANNOT_GET_TABLE_LOCK && storage) LOG_WARNING( - &Poco::Logger::get("PartsInfoLocal"), + getLogger("PartsInfoLocal"), "Failed to get parts info for table {} because cannot get table lock, skip it.", storage->getStorageID().getFullTableName()); } diff --git a/src/Storages/System/StorageSystemCnchStagedParts.cpp b/src/Storages/System/StorageSystemCnchStagedParts.cpp index 35e1a12505..1ea9e71a10 100644 --- a/src/Storages/System/StorageSystemCnchStagedParts.cpp +++ b/src/Storages/System/StorageSystemCnchStagedParts.cpp @@ -89,7 +89,7 @@ void StorageSystemCnchStagedParts::fillData(MutableColumns & res_columns, Contex only_selected_db = db_it->second.getType() == Field::Types::String ? db_it->second.get() : ""; only_selected_table = table_it->second.getType() == Field::Types::String ? table_it->second.get() : ""; enable_filter_by_table = true; - LOG_TRACE(&Poco::Logger::get("StorageSystemCnchStagedParts"), + LOG_TRACE(getLogger("StorageSystemCnchStagedParts"), "filtering from catalog by table with db name {} and table name {}", only_selected_db, only_selected_table); } diff --git a/src/Storages/System/StorageSystemCnchTableHost.cpp b/src/Storages/System/StorageSystemCnchTableHost.cpp index 38dc3d4dd5..9205fd79d5 100644 --- a/src/Storages/System/StorageSystemCnchTableHost.cpp +++ b/src/Storages/System/StorageSystemCnchTableHost.cpp @@ -65,7 +65,7 @@ void StorageSystemCnchTableHost::fillData(MutableColumns & res_columns, ContextP only_selected_database = db_it->second.getType() == Field::Types::String ? db_it->second.get() : ""; only_selected_table = table_it->second.getType() == Field::Types::String ? table_it->second.get() : ""; enable_filter_by_database_and_table = true; - LOG_TRACE(&Poco::Logger::get("StorageSystemCnchTableHost"), + LOG_TRACE(getLogger("StorageSystemCnchTableHost"), "filtering by db and table with db name {} and table name {}", only_selected_database, only_selected_table); } @@ -73,11 +73,11 @@ void StorageSystemCnchTableHost::fillData(MutableColumns & res_columns, ContextP { only_selected_database = db_it->second.getType() == Field::Types::String ? db_it->second.get() : ""; enable_filter_by_db = true; - LOG_TRACE(&Poco::Logger::get("StorageSystemCnchTableHost"), + LOG_TRACE(getLogger("StorageSystemCnchTableHost"), "filtering by db with db name {}", only_selected_database); } else - LOG_TRACE(&Poco::Logger::get("StorageSystemCnchTableHost"), "doesn't do any filtering"); + LOG_TRACE(getLogger("StorageSystemCnchTableHost"), "doesn't do any filtering"); } Catalog::CatalogPtr cnch_catalog = context->getCnchCatalog(); diff --git a/src/Storages/System/StorageSystemCnchTables.cpp b/src/Storages/System/StorageSystemCnchTables.cpp index 50f86ee5ab..da794c859b 100644 --- a/src/Storages/System/StorageSystemCnchTables.cpp +++ b/src/Storages/System/StorageSystemCnchTables.cpp @@ -309,7 +309,7 @@ Pipe StorageSystemCnchTables::read( } catch (...) { - tryLogCurrentException(&Poco::Logger::get("StorageSystemCnchTables")); + tryLogCurrentException(getLogger("StorageSystemCnchTables")); } if (!storage) continue; diff --git a/src/Storages/System/StorageSystemCnchTrashItemsInfo.cpp b/src/Storages/System/StorageSystemCnchTrashItemsInfo.cpp index ac6eafd2de..b8066e3cc4 100644 --- a/src/Storages/System/StorageSystemCnchTrashItemsInfo.cpp +++ b/src/Storages/System/StorageSystemCnchTrashItemsInfo.cpp @@ -58,7 +58,7 @@ Pipe StorageSystemCnchTrashItemsInfo::read( Block header = materializeBlock(InterpreterSelectQuery(ast, context, QueryProcessingStage::Complete).getSampleBlock()); QueryPlan query_plan; - Poco::Logger * log = &Poco::Logger::get("SystemTrashItemsInfo"); + LoggerPtr log = getLogger("SystemTrashItemsInfo"); ClusterProxy::SelectStreamFactory stream_factory = ClusterProxy::SelectStreamFactory( header, {}, {}, QueryProcessingStage::Complete, StorageID{"system", "cnch_trash_items_info_local"}, Scalars{}, false, {}); diff --git a/src/Storages/System/StorageSystemCnchTrashItemsInfoLocal.cpp b/src/Storages/System/StorageSystemCnchTrashItemsInfoLocal.cpp index 0b3163978c..be32326aff 100644 --- a/src/Storages/System/StorageSystemCnchTrashItemsInfoLocal.cpp +++ b/src/Storages/System/StorageSystemCnchTrashItemsInfoLocal.cpp @@ -138,7 +138,7 @@ Pipe StorageSystemCnchTrashItemsInfoLocal::read( { if (e.code() == ErrorCodes::CANNOT_GET_TABLE_LOCK && storage) LOG_WARNING( - &Poco::Logger::get("TrashItemsInfoLocal"), + getLogger("TrashItemsInfoLocal"), "Failed to get parts info for table {} because cannot get table lock, skip it.", storage->getStorageID().getFullTableName()); } diff --git a/src/Storages/System/StorageSystemCnchViewTables.cpp b/src/Storages/System/StorageSystemCnchViewTables.cpp index 33d37b4a3d..f4aa373218 100644 --- a/src/Storages/System/StorageSystemCnchViewTables.cpp +++ b/src/Storages/System/StorageSystemCnchViewTables.cpp @@ -171,7 +171,7 @@ Pipe StorageSystemCnchViewTables::read( } catch (...) { - tryLogCurrentException(&Poco::Logger::get("StorageSystemCnchViewTables")); + tryLogCurrentException(getLogger("StorageSystemCnchViewTables")); continue; } if (!mv) diff --git a/src/Storages/System/StorageSystemHuAllocStats.cpp b/src/Storages/System/StorageSystemHuAllocStats.cpp index c05cc39d7e..4b3ed735e9 100644 --- a/src/Storages/System/StorageSystemHuAllocStats.cpp +++ b/src/Storages/System/StorageSystemHuAllocStats.cpp @@ -79,7 +79,7 @@ Pipe StorageSystemHuAllocStats::read( res_columns.at(col_num++)->insert(GetTotalGiantAlloc()); res_columns.at(col_num++)->insert(GetTotalGiantFree()); #else - LOG_INFO(&Poco::Logger::get("StorageSystemHuAllocStats"), "HuAlloc is not enabled"); + LOG_INFO(getLogger("StorageSystemHuAllocStats"), "HuAlloc is not enabled"); #endif // USE_HUALLOC UInt64 num_rows = res_columns.at(0)->size(); diff --git a/src/Storages/System/StorageSystemMutations.cpp b/src/Storages/System/StorageSystemMutations.cpp index a101298783..3a3b5a72f7 100644 --- a/src/Storages/System/StorageSystemMutations.cpp +++ b/src/Storages/System/StorageSystemMutations.cpp @@ -107,7 +107,7 @@ void StorageSystemMutations::fillCnchData(MutableColumns & res_columns, ContextP if (!filtered_block.rows()) { - LOG_DEBUG(&Poco::Logger::get(__PRETTY_FUNCTION__), "No need to process any tables."); + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "No need to process any tables."); return ; } diff --git a/src/Storages/System/StorageSystemQueryCache.cpp b/src/Storages/System/StorageSystemQueryCache.cpp index 9051125561..fbb8cf5504 100644 --- a/src/Storages/System/StorageSystemQueryCache.cpp +++ b/src/Storages/System/StorageSystemQueryCache.cpp @@ -35,7 +35,7 @@ void StorageSystemQueryCache::fillData(MutableColumns & res_columns, ContextPtr std::vector content = query_cache->dump(); const String & user_name = context->getUserName(); - LOG_DEBUG(&Poco::Logger::get("StorageSystemQueryCache"), "user {}, number of entries {}", user_name, content.size()); + LOG_DEBUG(getLogger("StorageSystemQueryCache"), "user {}, number of entries {}", user_name, content.size()); for (const auto & [key, query_result] : content) { diff --git a/src/Storages/System/StorageSystemResourceGroups.cpp b/src/Storages/System/StorageSystemResourceGroups.cpp index bcf5360c42..70e5a5fe20 100644 --- a/src/Storages/System/StorageSystemResourceGroups.cpp +++ b/src/Storages/System/StorageSystemResourceGroups.cpp @@ -93,7 +93,7 @@ void StorageSystemResourceGroups::fillData(MutableColumns & res_columns, Context res_columns[i++]->insert(info.last_used / 1000); else { - LOG_WARNING(&Poco::Logger::get("StorageSystemResourceGroups"), "last_used unset. This should not happen"); + LOG_WARNING(getLogger("StorageSystemResourceGroups"), "last_used unset. This should not happen"); res_columns[i++]->insert(info.last_used); } res_columns[i++]->insert(info.in_use); diff --git a/src/Storages/System/StorageSystemStackTrace.cpp b/src/Storages/System/StorageSystemStackTrace.cpp index 7a8ee75803..b2c8ac50a1 100644 --- a/src/Storages/System/StorageSystemStackTrace.cpp +++ b/src/Storages/System/StorageSystemStackTrace.cpp @@ -152,7 +152,7 @@ namespace StorageSystemStackTrace::StorageSystemStackTrace(const StorageID & table_id_) : IStorageSystemOneBlock(table_id_) - , log(&Poco::Logger::get("StorageSystemStackTrace")) + , log(getLogger("StorageSystemStackTrace")) { notification_pipe.open(); diff --git a/src/Storages/System/StorageSystemStackTrace.h b/src/Storages/System/StorageSystemStackTrace.h index 460a9ec74f..32494a533e 100644 --- a/src/Storages/System/StorageSystemStackTrace.h +++ b/src/Storages/System/StorageSystemStackTrace.h @@ -2,6 +2,7 @@ #ifdef OS_LINUX /// Because of 'sigqueue' functions and RT signals. +#include #include #include #include @@ -36,7 +37,7 @@ protected: mutable std::mutex mutex; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/TSO/TSOClient.cpp b/src/TSO/TSOClient.cpp index 3313c9cb5d..ccc6d426c4 100644 --- a/src/TSO/TSOClient.cpp +++ b/src/TSO/TSOClient.cpp @@ -88,7 +88,7 @@ GetTimestampsResp TSOClient::getTimestamps(UInt32 size) UInt64 getTSOResponse(const Context & context, TSORequestType type, size_t size) { - static auto * log = &Poco::Logger::get("getTSOResponse"); + static auto log = getLogger("getTSOResponse"); ProfileEventTimeIncrement watch(ProfileEvents::TSORequestMicroseconds); const auto & config = context.getRootConfig(); diff --git a/src/TSO/TSOImpl.h b/src/TSO/TSOImpl.h index cb39eed70e..8b2460cc6e 100644 --- a/src/TSO/TSOImpl.h +++ b/src/TSO/TSOImpl.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -86,7 +87,7 @@ public: private: std::atomic ts = 0; std::atomic_bool is_kv_down{false}; - Poco::Logger * log = &Poco::Logger::get("TSOImpl"); + LoggerPtr log = getLogger("TSOImpl"); std::atomic logical_clock_checking {false}; std::atomic num_tso_update_timestamp_stopped_functioning{0}; diff --git a/src/TSO/TSOProxy.h b/src/TSO/TSOProxy.h index 73af6ec2c1..ca3649928d 100644 --- a/src/TSO/TSOProxy.h +++ b/src/TSO/TSOProxy.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include @@ -27,10 +28,10 @@ namespace TSO class TSOProxy { public: - explicit TSOProxy(std::shared_ptr metastore_ptr_, std::string key_, Poco::Logger * logger_) + explicit TSOProxy(std::shared_ptr metastore_ptr_, std::string key_) : metastore_ptr(std::move(metastore_ptr_)) , key(std::move(key_)) - , log{logger_} + , log{getLogger("TSOProxy")} {} ~TSOProxy() = default; @@ -42,7 +43,7 @@ public: private: std::shared_ptr metastore_ptr; std::string key; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/TSO/TSOServer.cpp b/src/TSO/TSOServer.cpp index bac45a11ae..77bf86304c 100644 --- a/src/TSO/TSOServer.cpp +++ b/src/TSO/TSOServer.cpp @@ -86,7 +86,7 @@ void TSOServer::initialize(Poco::Util::Application & self) { BaseDaemon::initialize(self); - log = &logger(); + log = getLogger(logger()); registerServiceDiscovery(); @@ -195,7 +195,7 @@ void TSOServer::updateTSO() update_tso_task->scheduleAfter(TSO_UPDATE_INTERVAL); } -Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port, Poco::Logger * log) +Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port, LoggerRawPtr log) { Poco::Net::SocketAddress socket_address; try @@ -345,7 +345,7 @@ int TSOServer::main(const std::vector &) auto metastore_conf = MetastoreConfig{config(), TSO_SERVICE_CONFIGURE}; auto tso_metastore = Catalog::getMetastorePtr(metastore_conf); - proxy_ptr = std::make_shared(std::move(tso_metastore), metastore_conf.key_name, log); + proxy_ptr = std::make_shared(std::move(tso_metastore), metastore_conf.key_name); tso_service = std::make_shared(*this); bool listen_try = config().getBool("listen_try", false); diff --git a/src/TSO/TSOServer.h b/src/TSO/TSOServer.h index 759a39b691..ef007f4bb2 100644 --- a/src/TSO/TSOServer.h +++ b/src/TSO/TSOServer.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -98,7 +99,7 @@ protected: private: friend class TSOImpl; - Poco::Logger * log; + LoggerPtr log; size_t tso_window; diff --git a/src/TableFunctions/TableFunctionCnchHive.h b/src/TableFunctions/TableFunctionCnchHive.h index 0027fb18c5..1063ee948e 100644 --- a/src/TableFunctions/TableFunctionCnchHive.h +++ b/src/TableFunctions/TableFunctionCnchHive.h @@ -1,4 +1,5 @@ #pragma once +#include #include "Common/config.h" #if USE_HIVE @@ -24,7 +25,7 @@ public: void parseArguments(const ASTPtr & ast_function_, ContextPtr context_) override; private: - Poco::Logger * logger = &Poco::Logger::get("TableFunctionHive"); + LoggerPtr logger = getLogger("TableFunctionHive"); String cluster_name; String hive_metastore_url; diff --git a/src/TableFunctions/TableFunctionS3.cpp b/src/TableFunctions/TableFunctionS3.cpp index 11176da7fb..8961056396 100644 --- a/src/TableFunctions/TableFunctionS3.cpp +++ b/src/TableFunctions/TableFunctionS3.cpp @@ -116,7 +116,7 @@ void TableFunctionS3::parseArgumentsImpl(ASTs & args, const ContextPtr & context else { auto fourth_arg = checkAndGetLiteralArgument(args[3], "format/session_token"); - LOG_TRACE(&Poco::Logger::get("TableFunctionS3"), " config fourth_arg = {}", fourth_arg); + LOG_TRACE(getLogger("TableFunctionS3"), " config fourth_arg = {}", fourth_arg); if (fourth_arg == "auto" || FormatFactory::instance().exists(fourth_arg)) { @@ -182,7 +182,7 @@ void TableFunctionS3::parseArgumentsImpl(ASTs & args, const ContextPtr & context if (args_to_idx.contains("format")) { - LOG_TRACE(&Poco::Logger::get("TableFunctionS3"), " args_to_idx contain format"); + LOG_TRACE(getLogger("TableFunctionS3"), " args_to_idx contain format"); auto format = checkAndGetLiteralArgument(args[args_to_idx["format"]], "format"); /// Set format to configuration only of it's not 'auto', @@ -208,7 +208,7 @@ void TableFunctionS3::parseArgumentsImpl(ASTs & args, const ContextPtr & context configuration.auth_settings.no_sign_request = no_sign_request; - LOG_TRACE(&Poco::Logger::get("TableFunctionS3"), " config format = {}", configuration.format); + LOG_TRACE(getLogger("TableFunctionS3"), " config format = {}", configuration.format); // if (configuration.format == "auto") // configuration.format = FormatFactory::instance().tryGetFormatFromFileName(Poco::URI(url).getPath()).value_or("auto"); diff --git a/src/Transaction/Actions/DDLAlterAction.h b/src/Transaction/Actions/DDLAlterAction.h index daff60ca22..7e941631a6 100644 --- a/src/Transaction/Actions/DDLAlterAction.h +++ b/src/Transaction/Actions/DDLAlterAction.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -37,7 +38,7 @@ class DDLAlterAction : public IAction public: DDLAlterAction(const ContextPtr & query_context_, const TxnTimestamp & txn_id_, StoragePtr table_, const Settings & query_settings_, const String & query_id_) : IAction(query_context_, txn_id_), - log(&Poco::Logger::get("AlterAction")), + log(getLogger("AlterAction")), table(std::move(table_)), query_settings(query_settings_), params{table->getStorageID(), "fake_statement", false, ""}, @@ -47,7 +48,7 @@ public: DDLAlterAction(const ContextPtr & query_context_, const TxnTimestamp & txn_id_, AlterDatabaseActionParams params_, const Settings & query_settings_) : IAction(query_context_, txn_id_), - log(&Poco::Logger::get("AlterAction")), + log(getLogger("AlterAction")), query_settings(query_settings_), params(std::move(params_)) { @@ -74,7 +75,7 @@ private: // return if the DDL will change table schema. bool changeSchema() const; - Poco::Logger * log; + LoggerPtr log; const StoragePtr table; String old_schema; diff --git a/src/Transaction/Actions/DDLDropAction.h b/src/Transaction/Actions/DDLDropAction.h index 45eb800ead..e199211938 100644 --- a/src/Transaction/Actions/DDLDropAction.h +++ b/src/Transaction/Actions/DDLDropAction.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -57,7 +58,7 @@ class DDLDropAction : public IAction { public: DDLDropAction(const ContextPtr & query_context_, const TxnTimestamp & txn_id_, DropActionParams params_) - : IAction(query_context_, txn_id_), params(std::move(params_)), log(&Poco::Logger::get("DropAction")) + : IAction(query_context_, txn_id_), params(std::move(params_)), log(getLogger("DropAction")) { } @@ -70,7 +71,7 @@ private: // void updateTsCache(const UUID & uuid, const TxnTimestamp & commit_time) override; DropActionParams params; - Poco::Logger * log; + LoggerPtr log; }; using DDLDropActionPtr = std::shared_ptr; diff --git a/src/Transaction/Actions/DropRangeAction.h b/src/Transaction/Actions/DropRangeAction.h index 2060b6c9be..f05c6becdc 100644 --- a/src/Transaction/Actions/DropRangeAction.h +++ b/src/Transaction/Actions/DropRangeAction.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -31,7 +32,7 @@ public: IAction(query_context_, txn_id_), txn_record(std::move(record)), table(table_), - log(&Poco::Logger::get("DropRangeAction")) + log(getLogger("DropRangeAction")) {} ~DropRangeAction() override = default; @@ -52,7 +53,7 @@ public: private: TransactionRecord txn_record; const StoragePtr table; - Poco::Logger * log; + LoggerPtr log; MutableMergeTreeDataPartsCNCHVector parts; MutableMergeTreeDataPartsCNCHVector staged_parts; diff --git a/src/Transaction/Actions/InsertAction.h b/src/Transaction/Actions/InsertAction.h index 4ccc9e044c..9a19f389bf 100644 --- a/src/Transaction/Actions/InsertAction.h +++ b/src/Transaction/Actions/InsertAction.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -77,7 +78,7 @@ private: CnchDedupHelper::DedupMode dedup_mode = CnchDedupHelper::DedupMode::APPEND; bool executed{false}; - Poco::Logger * log{&Poco::Logger::get("InsertAction")}; + LoggerPtr log{getLogger("InsertAction")}; UInt64 * block_id = nullptr; }; diff --git a/src/Transaction/Actions/MergeMutateAction.h b/src/Transaction/Actions/MergeMutateAction.h index a26130a421..e253317aa2 100644 --- a/src/Transaction/Actions/MergeMutateAction.h +++ b/src/Transaction/Actions/MergeMutateAction.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -44,7 +45,7 @@ public: , source_part_names(source_part_names_) , manipulation_submit_time_ns(manipulation_submit_time_ns_) , peak_memory_usage(peak_memory_usage_) - , log(&Poco::Logger::get("MergeMutationAction")) + , log(getLogger("MergeMutationAction")) { } @@ -79,7 +80,7 @@ private: UInt64 manipulation_submit_time_ns; UInt64 peak_memory_usage; - Poco::Logger * log; + LoggerPtr log; MutableMergeTreeDataPartsCNCHVector parts; DeleteBitmapMetaPtrVector delete_bitmaps; diff --git a/src/Transaction/Actions/S3AttachMetaAction.h b/src/Transaction/Actions/S3AttachMetaAction.h index 2718d1cf84..3be31db5d2 100644 --- a/src/Transaction/Actions/S3AttachMetaAction.h +++ b/src/Transaction/Actions/S3AttachMetaAction.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -80,7 +81,7 @@ private: bool executed{false}; - Poco::Logger * log{&Poco::Logger::get("S3AttachMetaAction")}; + LoggerPtr log{getLogger("S3AttachMetaAction")}; }; } diff --git a/src/Transaction/CnchExplicitTransaction.h b/src/Transaction/CnchExplicitTransaction.h index 54db81e650..99dc150781 100644 --- a/src/Transaction/CnchExplicitTransaction.h +++ b/src/Transaction/CnchExplicitTransaction.h @@ -14,6 +14,7 @@ */ #pragma once +#include #include #include #include @@ -30,7 +31,7 @@ namespace DB using Base = ICnchTransaction; private: - Poco::Logger * log {&Poco::Logger::get("CnchExplicitTransaction")}; + LoggerPtr log {getLogger("CnchExplicitTransaction")}; std::vector secondary_txns; std::vector statements; static constexpr int MAX_RETRY = 3; diff --git a/src/Transaction/CnchLock.cpp b/src/Transaction/CnchLock.cpp index 6d7f6993b5..7f67e8208b 100644 --- a/src/Transaction/CnchLock.cpp +++ b/src/Transaction/CnchLock.cpp @@ -47,7 +47,7 @@ public: auto client = getTargetServer(); LOG_DEBUG( - &Poco::Logger::get("CnchLockManagerClient"), + getLogger("CnchLockManagerClient"), "try lock {}, target server: {}", lock_info->toDebugString(), (client.has_value() ? (*client)->getRPCAddress() : "local")); if (!client) @@ -70,7 +70,7 @@ public: if (locked) { LOG_DEBUG( - &Poco::Logger::get("CnchLockManagerClient"), + getLogger("CnchLockManagerClient"), "unlock lock {}, target server: {}", lock_info->toDebugString(), (server_client ? server_client->getRPCAddress() : "local")); if (server_client) @@ -162,7 +162,7 @@ CnchLockHolder::~CnchLockHolder() bool CnchLockHolder::tryLock() { Stopwatch watch; - SCOPE_EXIT({ LOG_DEBUG(&Poco::Logger::get("CnchLock"), "acquire {} locks in {} ms", cnch_locks.size(), watch.elapsedMilliseconds()); }); + SCOPE_EXIT({ LOG_DEBUG(getLogger("CnchLock"), "acquire {} locks in {} ms", cnch_locks.size(), watch.elapsedMilliseconds()); }); /// Init heartbeat task if needed /// We need to start the heartbeat process in advance, otherwise txn may be aborted due to expiration time diff --git a/src/Transaction/CnchServerTransaction.h b/src/Transaction/CnchServerTransaction.h index e93fae11c3..949b9e5be7 100644 --- a/src/Transaction/CnchServerTransaction.h +++ b/src/Transaction/CnchServerTransaction.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -65,7 +66,7 @@ public: void incrementModifiedCount(const Statistics::AutoStats::ModifiedCounter& new_counts); - Poco::Logger * getLogger() { return log; } + LoggerPtr getLogger() { return log; } protected: static constexpr size_t MAX_RETRY = 3; @@ -75,7 +76,7 @@ protected: private: - Poco::Logger * log {&Poco::Logger::get("CnchServerTransaction")}; + LoggerPtr log {::getLogger("CnchServerTransaction")}; std::atomic_bool dedup_stage_flag{false}; diff --git a/src/Transaction/CnchWorkerTransaction.h b/src/Transaction/CnchWorkerTransaction.h index 87a57fcb2e..4519729d31 100644 --- a/src/Transaction/CnchWorkerTransaction.h +++ b/src/Transaction/CnchWorkerTransaction.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -93,7 +94,7 @@ private: CnchServerClientPtr server_client; StorageID kafka_table_id{StorageID::createEmpty()}; size_t kafka_consumer_index{SIZE_MAX}; - Poco::Logger * log {&Poco::Logger::get("CnchWorkerTransaction")}; + LoggerPtr log {getLogger("CnchWorkerTransaction")}; /// Transaction should only be committed explicitly bool enable_explicit_commit{false}; diff --git a/src/Transaction/GlobalTxnCommitter.h b/src/Transaction/GlobalTxnCommitter.h index b394c8e960..3138b38891 100644 --- a/src/Transaction/GlobalTxnCommitter.h +++ b/src/Transaction/GlobalTxnCommitter.h @@ -1,4 +1,5 @@ #pragma once +#include #include namespace DB @@ -19,7 +20,7 @@ private: std::mutex committers_mutex; std::map committers; - Poco::Logger * log{&Poco::Logger::get("GlobalTXNComitter")}; + LoggerPtr log{getLogger("GlobalTXNComitter")}; }; using GlobalTxnCommitterPtr = std::shared_ptr; diff --git a/src/Transaction/ICnchTransaction.h b/src/Transaction/ICnchTransaction.h index 2394aaf0b5..aa61b78957 100644 --- a/src/Transaction/ICnchTransaction.h +++ b/src/Transaction/ICnchTransaction.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -273,7 +274,7 @@ private: String creator; mutable bthread::RecursiveMutex mutex; - Poco::Logger * log{&Poco::Logger::get("ICnchTransaction")}; + LoggerPtr log{getLogger("ICnchTransaction")}; mutable std::mutex database_cache_mutex; std::map database_cache; }; diff --git a/src/Transaction/IntentLock.h b/src/Transaction/IntentLock.h index 882d11e629..f31713cb76 100644 --- a/src/Transaction/IntentLock.h +++ b/src/Transaction/IntentLock.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -47,7 +48,7 @@ public: , txn_record(std::move(txn_record_)) , lock_prefix(lock_prefix_) , intent_names(std::move(intent_names_)) - , log(&Poco::Logger::get("IntentLock")) + , log(getLogger("IntentLock")) { } @@ -77,7 +78,7 @@ private: bool locked{false}; bool valid{true}; - Poco::Logger * log; + LoggerPtr log; void lockImpl(); void writeIntents(); diff --git a/src/Transaction/LockManager.h b/src/Transaction/LockManager.h index d38db51785..705f6f2dcd 100644 --- a/src/Transaction/LockManager.h +++ b/src/Transaction/LockManager.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -132,7 +133,7 @@ private: ContextPtr global_context; BackgroundSchedulePool::TaskHolder txn_checker; - Poco::Logger * log{&Poco::Logger::get("CnchLockManager")}; + LoggerPtr log{getLogger("CnchLockManager")}; std::atomic is_stopped{false}; diff --git a/src/Transaction/TableTxnCommitter.h b/src/Transaction/TableTxnCommitter.h index e4e13987a4..818e12905f 100644 --- a/src/Transaction/TableTxnCommitter.h +++ b/src/Transaction/TableTxnCommitter.h @@ -1,4 +1,5 @@ #pragma once +#include #include @@ -12,7 +13,7 @@ public: explicit TableTxnCommitter(const ContextPtr & context_, const UUID & storage_uuid_) : context(context_), uuid(storage_uuid_), - log(&Poco::Logger::get("TnxComitter(" + UUIDHelpers::UUIDToString(uuid) + ")")) + log(getLogger("TnxComitter(" + UUIDHelpers::UUIDToString(uuid) + ")")) { } @@ -26,7 +27,7 @@ private: ContextPtr context; const UUID uuid; - Poco::Logger * log; + LoggerPtr log; std::atomic latest_version {0}; std::mutex commit_mutex; diff --git a/src/Transaction/TransactionCleaner.h b/src/Transaction/TransactionCleaner.h index 1987b3ecec..ef88712272 100644 --- a/src/Transaction/TransactionCleaner.h +++ b/src/Transaction/TransactionCleaner.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -193,7 +194,7 @@ private: mutable std::mutex mutex; TxnCleanTasksMap clean_tasks; bool shutdown{false}; - Poco::Logger * log = &Poco::Logger::get("TransactionCleaner"); + LoggerPtr log = getLogger("TransactionCleaner"); }; using TransactionCleanerPtr = std::unique_ptr; diff --git a/src/Transaction/TransactionCommon.h b/src/Transaction/TransactionCommon.h index b82a0ce687..7a35477834 100644 --- a/src/Transaction/TransactionCommon.h +++ b/src/Transaction/TransactionCommon.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -144,7 +145,7 @@ struct UndoResource UInt64 id; Container pb_model; - Poco::Logger * log = &Poco::Logger::get("UndoResource"); + LoggerPtr log = getLogger("UndoResource"); /// pb_model.placeholders is a repeated field that hold resource info. Depending on the type, /// we have different interpretations of the resource info. Note that disk name is not a diff --git a/src/Transaction/TransactionCoordinatorRcCnch.h b/src/Transaction/TransactionCoordinatorRcCnch.h index 05f9b43141..37d66bb469 100644 --- a/src/Transaction/TransactionCoordinatorRcCnch.h +++ b/src/Transaction/TransactionCoordinatorRcCnch.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -79,7 +80,7 @@ public: getContext()->getConfigRef().getUInt("cnch_transaction_cleaner_dm_queue_size", 10000))) , finished_or_failed_txn_record_cache(std::make_unique(getContext()->getConfigRef().getUInt("size_of_cached_txn_records", 20000))) , scan_interval(getContext()->getConfigRef().getInt("cnch_transaction_list_scan_interval", 10 * 60 * 1000)) // default 10 mins - , log(&Poco::Logger::get("TransactionCoordinator")) + , log(getLogger("TransactionCoordinator")) { scan_active_txns_task = getContext()->getSchedulePool().createTask("ScanActiveTxnsTask", [this]() { scanActiveTransactions(); }); scan_active_txns_task->activate(); @@ -203,7 +204,7 @@ private: UInt64 scan_interval; BackgroundSchedulePool::TaskHolder scan_active_txns_task; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Transaction/tryAbortTransactionFromWorker.cpp b/src/Transaction/tryAbortTransactionFromWorker.cpp index c693bea2c1..90d8d1b448 100644 --- a/src/Transaction/tryAbortTransactionFromWorker.cpp +++ b/src/Transaction/tryAbortTransactionFromWorker.cpp @@ -25,7 +25,7 @@ namespace DB TransactionRecord tryAbortTransactionFromWorker(const Context & context, const TransactionCnchPtr & txn) { static constexpr size_t MAX_ABORT_RETRY = 3; - Poco::Logger * log = &Poco::Logger::get(__func__); + LoggerPtr log = getLogger(__func__); TransactionRecord cur_txn_record = txn->getTransactionRecord(); cur_txn_record.setStatus(CnchTransactionStatus::Running); diff --git a/src/WorkerTasks/CloudUniqueMergeTreeMergeTask.cpp b/src/WorkerTasks/CloudUniqueMergeTreeMergeTask.cpp index f52283dbb2..8bbc6f29f6 100644 --- a/src/WorkerTasks/CloudUniqueMergeTreeMergeTask.cpp +++ b/src/WorkerTasks/CloudUniqueMergeTreeMergeTask.cpp @@ -41,7 +41,7 @@ CloudUniqueMergeTreeMergeTask::CloudUniqueMergeTreeMergeTask( : ManipulationTask(std::move(params_), std::move(context_)) , storage(storage_) , log_name(storage.getLogName() + "(MergeTask)") - , log(&Poco::Logger::get(log_name)) + , log(getLogger(log_name)) , cnch_writer(storage, getContext(), ManipulationType::Merge, getTaskID()) { if (params.source_data_parts.empty()) @@ -202,7 +202,7 @@ void CloudUniqueMergeTreeMergeTask::executeImpl() if (UInt64(time(nullptr) - last_touch_time) > getContext()->getSettingsRef().cloud_task_auto_stop_timeout) { LOG_TRACE( - &Poco::Logger::get("CloudUniqueMergeTreeMergeTask"), + getLogger("CloudUniqueMergeTreeMergeTask"), "Task {} doesn't receive heartbeat from server, stop it self.", params.task_id); setCancelled(); diff --git a/src/WorkerTasks/CloudUniqueMergeTreeMergeTask.h b/src/WorkerTasks/CloudUniqueMergeTreeMergeTask.h index 3fa2a47e8c..1b9161e1a5 100644 --- a/src/WorkerTasks/CloudUniqueMergeTreeMergeTask.h +++ b/src/WorkerTasks/CloudUniqueMergeTreeMergeTask.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -48,7 +49,7 @@ private: StorageCloudMergeTree & storage; String log_name; - Poco::Logger * log; + LoggerPtr log; CnchDataWriter cnch_writer; String partition_id; diff --git a/src/WorkerTasks/CnchMergePrefetcher.cpp b/src/WorkerTasks/CnchMergePrefetcher.cpp index 607fc6cd3b..f20ba6c523 100644 --- a/src/WorkerTasks/CnchMergePrefetcher.cpp +++ b/src/WorkerTasks/CnchMergePrefetcher.cpp @@ -35,7 +35,7 @@ void CnchMergePrefetcher::PartFutureFiles::schedulePrefetchTask(FutureSegment & try { Stopwatch stopwatch; - LOG_TRACE(&Poco::Logger::get("CnchMergePrefetcher"), "Stage {} copying to {}", stage, local_path); + LOG_TRACE(getLogger("CnchMergePrefetcher"), "Stage {} copying to {}", stage, local_path); std::unique_ptr in = part->volume->getDisk()->readFile( remote_rel_path, future_segment.prefetcher->read_settings); @@ -46,7 +46,7 @@ void CnchMergePrefetcher::PartFutureFiles::schedulePrefetchTask(FutureSegment & if (!cancel_flag.load(std::memory_order_relaxed)) { - LOG_TRACE(&Poco::Logger::get("CnchMergePrefetcher"), "Stage {} " + LOG_TRACE(getLogger("CnchMergePrefetcher"), "Stage {} " "copied to {}, elapsed {} ms.", stage, local_path, stopwatch.elapsedMilliseconds()); } @@ -110,13 +110,13 @@ void CnchMergePrefetcher::PartFutureFiles::releaseSegment(const String & stream_ future_segment->future_access -= 1; if (future_segment->future_access == 0) { - LOG_TRACE(&Poco::Logger::get("CnchMergePrefetcher"), "Removing {}", + LOG_TRACE(getLogger("CnchMergePrefetcher"), "Removing {}", future_segment->reservation->getDisk()->getPath() + future_segment->data_relative_path); future_segment->reservation->getDisk()->removeRecursive(future_segment->data_relative_path); } else if (future_segment->future_access < 0) { - LOG_WARNING(&Poco::Logger::get("CnchMergePrefetcher"), "FutureSegment access count < 0, this is a bug"); + LOG_WARNING(getLogger("CnchMergePrefetcher"), "FutureSegment access count < 0, this is a bug"); } } } @@ -141,7 +141,7 @@ void CnchMergePrefetcher::submitDataPart( if (merging_columns.empty()) throw Exception("Expect non-empty merging_columns", ErrorCodes::LOGICAL_ERROR); - auto* log = &Poco::Logger::get("CnchMergePrefetcher"); + auto log = getLogger("CnchMergePrefetcher"); auto* future_files = part_to_future_files.try_emplace(data_part->name, std::make_unique(*this, data_part->name)).first->second.get(); diff --git a/src/WorkerTasks/ManipulationTask.cpp b/src/WorkerTasks/ManipulationTask.cpp index 1492ce3ee6..5086c907e8 100644 --- a/src/WorkerTasks/ManipulationTask.cpp +++ b/src/WorkerTasks/ManipulationTask.cpp @@ -58,7 +58,7 @@ void ManipulationTask::execute() void executeManipulationTask(ManipulationTaskPtr task, MergeTreeMutableDataPartsVector all_parts) { - auto * log = &Poco::Logger::get(__func__); + auto log = getLogger(__func__); try { diff --git a/src/WorkerTasks/ManipulationTask.h b/src/WorkerTasks/ManipulationTask.h index dcc0dec964..21e6cc08e8 100644 --- a/src/WorkerTasks/ManipulationTask.h +++ b/src/WorkerTasks/ManipulationTask.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -49,7 +50,7 @@ public: if (static_cast(time(nullptr) - getManipulationListElement()->last_touch_time.load(std::memory_order_relaxed)) > timeout) { - LOG_TRACE(&Poco::Logger::get("ManipulationTask"), + LOG_TRACE(getLogger("ManipulationTask"), "Set is_cancelled for task {} as no heartbeat from server.", getManipulationListElement()->task_id); setCancelled(); return true; diff --git a/src/WorkerTasks/MergeTreeDataMerger.cpp b/src/WorkerTasks/MergeTreeDataMerger.cpp index 4df7a94fa2..8a2f5c0a74 100644 --- a/src/WorkerTasks/MergeTreeDataMerger.cpp +++ b/src/WorkerTasks/MergeTreeDataMerger.cpp @@ -120,7 +120,7 @@ MergeTreeDataMerger::MergeTreeDataMerger( , check_cancel(std::move(check_cancel_)) , build_rowid_mappings(build_rowid_mappings_) , rowid_mappings(params.source_data_parts.size()) - , log(&Poco::Logger::get(data.getLogName() + " (Merger)")) + , log(getLogger(data.getLogName() + " (Merger)")) { if (build_rowid_mappings && data.merging_params.mode != MergeTreeMetaBase::MergingParams::Ordinary) throw Exception( diff --git a/src/WorkerTasks/MergeTreeDataMerger.h b/src/WorkerTasks/MergeTreeDataMerger.h index 0483e1da78..af0a405899 100644 --- a/src/WorkerTasks/MergeTreeDataMerger.h +++ b/src/WorkerTasks/MergeTreeDataMerger.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -108,7 +109,7 @@ private: bool build_rowid_mappings; /// rowid mapping for each input part, only for normal parts std::vector rowid_mappings; - Poco::Logger * log = nullptr; + LoggerPtr log = nullptr; ReservationPtr space_reservation; /// Used for building rowid mappings size_t output_rowid = 0; diff --git a/src/WorkerTasks/MergeTreeDataMutator.cpp b/src/WorkerTasks/MergeTreeDataMutator.cpp index cc454fa8b3..08b50683c4 100644 --- a/src/WorkerTasks/MergeTreeDataMutator.cpp +++ b/src/WorkerTasks/MergeTreeDataMutator.cpp @@ -80,7 +80,7 @@ static const double DISK_USAGE_COEFFICIENT_TO_RESERVE = 1.1; MergeTreeDataMutator::MergeTreeDataMutator(MergeTreeMetaBase & data_, size_t background_pool_size_) : data(data_) , background_pool_size(background_pool_size_) - , log(&Poco::Logger::get(data.getLogName() + " (CnchMutator)")) + , log(getLogger(data.getLogName() + " (CnchMutator)")) { } diff --git a/src/WorkerTasks/MergeTreeDataMutator.h b/src/WorkerTasks/MergeTreeDataMutator.h index 0975e1b01e..ad04d39628 100644 --- a/src/WorkerTasks/MergeTreeDataMutator.h +++ b/src/WorkerTasks/MergeTreeDataMutator.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -184,7 +185,7 @@ private: MergeTreeMetaBase & data; const size_t background_pool_size; - Poco::Logger * log; + LoggerPtr log; bool is_delete_command = false; }; diff --git a/src/WorkerTasks/MergeTreeDataReclusterMutator.cpp b/src/WorkerTasks/MergeTreeDataReclusterMutator.cpp index a0866881b0..b6d88ebdfb 100644 --- a/src/WorkerTasks/MergeTreeDataReclusterMutator.cpp +++ b/src/WorkerTasks/MergeTreeDataReclusterMutator.cpp @@ -30,7 +30,7 @@ namespace ErrorCodes MergeTreeDataReclusterMutator::MergeTreeDataReclusterMutator(MergeTreeMetaBase & data_) : data(data_) - , log(&Poco::Logger::get(data.getLogName() + " (CnchRecluster)")) + , log(getLogger(data.getLogName() + " (CnchRecluster)")) { } diff --git a/src/WorkerTasks/MergeTreeDataReclusterMutator.h b/src/WorkerTasks/MergeTreeDataReclusterMutator.h index 48708b079c..d9f256c348 100644 --- a/src/WorkerTasks/MergeTreeDataReclusterMutator.h +++ b/src/WorkerTasks/MergeTreeDataReclusterMutator.h @@ -15,6 +15,7 @@ #pragma once +#include #include #include #include @@ -48,7 +49,7 @@ private: MergeTreeMetaBase & data; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/WorkerTasks/StorageMaterializedViewRefreshTask.cpp b/src/WorkerTasks/StorageMaterializedViewRefreshTask.cpp index 81a27b7df7..9b29eb3e15 100644 --- a/src/WorkerTasks/StorageMaterializedViewRefreshTask.cpp +++ b/src/WorkerTasks/StorageMaterializedViewRefreshTask.cpp @@ -25,7 +25,7 @@ StorageMaterializedViewRefreshTask::StorageMaterializedViewRefreshTask( : ManipulationTask(std::move(params_), std::move(context_)) , storage(storage_), mv_storage_id(mv_storage_id_), server_client(client) { - LOG_DEBUG(&Poco::Logger::get("StorageMaterializedViewRefreshTask"), "construct StorageMaterializedViewRefreshTask for {}.", storage.getTableName()); + LOG_DEBUG(getLogger("StorageMaterializedViewRefreshTask"), "construct StorageMaterializedViewRefreshTask for {}.", storage.getTableName()); } void StorageMaterializedViewRefreshTask::refreshAsyncOnWorker(AsyncRefreshParam & mv_refresh_param, ContextMutablePtr insert_context) @@ -34,7 +34,7 @@ void StorageMaterializedViewRefreshTask::refreshAsyncOnWorker(AsyncRefreshParam throw Exception("refreshAsyncOnWorker should run in worker", ErrorCodes::LOGICAL_ERROR); // INSERT SELECT - LOG_DEBUG(&Poco::Logger::get("StorageMaterializedViewRefreshTask"), "materialized view refresh in worker insert select query: {}", mv_refresh_param.insert_select_query); + LOG_DEBUG(getLogger("StorageMaterializedViewRefreshTask"), "materialized view refresh in worker insert select query: {}", mv_refresh_param.insert_select_query); BlockIO insert_io; try { @@ -73,7 +73,7 @@ void StorageMaterializedViewRefreshTask::executeImpl() { // refreshAsyncOnWorker(*params.mv_refresh_param, const_cast(*getContext()).shared_from_this()); - LOG_DEBUG(&Poco::Logger::get("StorageMaterializedViewRefreshTask"), "insert select finished."); + LOG_DEBUG(getLogger("StorageMaterializedViewRefreshTask"), "insert select finished."); server_client->handleRefreshTaskOnFinish(mv_storage_id, params.task_id, params.txn_id); } diff --git a/src/mysqlxx/Pool.h b/src/mysqlxx/Pool.h index 9e44d8a316..796c774c51 100644 --- a/src/mysqlxx/Pool.h +++ b/src/mysqlxx/Pool.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include