mirror of https://github.com/ByConity/ByConity
Merge a59c30e410
into f3d144d5aa
This commit is contained in:
commit
13ae398470
|
@ -92,5 +92,4 @@
|
|||
</cnch_default_hdfs>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
<cnch_config>root_path_replace_me/config/cnch_config.xml</cnch_config>
|
||||
</yandex>
|
||||
|
|
|
@ -365,5 +365,4 @@
|
|||
</cnch_default_hdfs>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
<cnch_config>root_path_replace_me/config/cnch_config.xml</cnch_config>
|
||||
</yandex>
|
||||
|
|
|
@ -17,6 +17,5 @@
|
|||
<tso_get_leader_info_interval_ms>0</tso_get_leader_info_interval_ms>
|
||||
<tso_max_retry_count>3</tso_max_retry_count>
|
||||
</tso_service>
|
||||
<cnch_config>root_path_replace_me/config/cnch_config.xml</cnch_config>
|
||||
</yandex>
|
||||
|
||||
|
|
|
@ -438,5 +438,4 @@
|
|||
</cnch_default_hdfs>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
<cnch_config>root_path_replace_me/config/cnch_config.xml</cnch_config>
|
||||
</yandex>
|
||||
|
|
|
@ -438,5 +438,4 @@
|
|||
</cnch_default_hdfs>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
<cnch_config>root_path_replace_me/config/cnch_config.xml</cnch_config>
|
||||
</yandex>
|
||||
|
|
|
@ -438,5 +438,4 @@
|
|||
</cnch_default_hdfs>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
<cnch_config>root_path_replace_me/config/cnch_config.xml</cnch_config>
|
||||
</yandex>
|
||||
|
|
|
@ -438,5 +438,4 @@
|
|||
</cnch_default_hdfs>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
<cnch_config>root_path_replace_me/config/cnch_config.xml</cnch_config>
|
||||
</yandex>
|
||||
|
|
|
@ -31,27 +31,26 @@ description: |
|
|||
overrides:
|
||||
deb:
|
||||
depends:
|
||||
- foundationdb-clients
|
||||
- foundationdb-clients
|
||||
rpm:
|
||||
depends:
|
||||
- foundationdb-clients
|
||||
- foundationdb-clients
|
||||
|
||||
contents:
|
||||
- src: root/usr/bin/clickhouse
|
||||
dst: /usr/bin/clickhouse
|
||||
- src: root/etc/byconity-server/cnch_config.xml
|
||||
dst: /etc/byconity-server/cnch_config.xml
|
||||
type: config|noreplace
|
||||
- src: fdb.cluster
|
||||
dst: /etc/byconity-server/fdb.cluster
|
||||
type: config|noreplace
|
||||
# docs
|
||||
- src: ../AUTHORS
|
||||
dst: /usr/share/doc/byconity-common-static/AUTHORS
|
||||
- src: ../CHANGELOG.md
|
||||
dst: /usr/share/doc/byconity-common-static/CHANGELOG.md
|
||||
- src: ../LICENSE
|
||||
dst: /usr/share/doc/byconity-common-static/LICENSE
|
||||
- src: ../README.md
|
||||
dst: /usr/share/doc/byconity-common-static/README.md
|
||||
|
||||
- src: root/usr/bin/clickhouse
|
||||
dst: /usr/bin/clickhouse
|
||||
- src: root/etc/byconity-server/conf.d
|
||||
dst: /etc/byconity-server/conf.d
|
||||
type: config|noreplace
|
||||
- src: fdb.cluster
|
||||
dst: /etc/byconity-server/fdb.cluster
|
||||
type: config|noreplace
|
||||
# docs
|
||||
- src: ../AUTHORS
|
||||
dst: /usr/share/doc/byconity-common-static/AUTHORS
|
||||
- src: ../CHANGELOG.md
|
||||
dst: /usr/share/doc/byconity-common-static/CHANGELOG.md
|
||||
- src: ../LICENSE
|
||||
dst: /usr/share/doc/byconity-common-static/LICENSE
|
||||
- src: ../README.md
|
||||
dst: /usr/share/doc/byconity-common-static/README.md
|
||||
|
|
|
@ -374,13 +374,13 @@ private:
|
|||
query_id_formats.emplace_back("Query id:", " {query_id}\n");
|
||||
#if USE_HDFS
|
||||
/// Init HDFS3 client config path
|
||||
std::string hdfs_config = context->getCnchConfigRef().getString("hdfs3_config", "");
|
||||
std::string hdfs_config = context->getConfigRef().getString("hdfs3_config", "");
|
||||
if (!hdfs_config.empty())
|
||||
{
|
||||
setenv("LIBHDFS3_CONF", hdfs_config.c_str(), 1);
|
||||
}
|
||||
|
||||
HDFSConnectionParams hdfs_params = HDFSConnectionParams::parseHdfsFromConfig(context->getCnchConfigRef());
|
||||
HDFSConnectionParams hdfs_params = HDFSConnectionParams::parseHdfsFromConfig(context->getConfigRef());
|
||||
context->setHdfsConnectionParams(hdfs_params);
|
||||
#endif
|
||||
auto vetos_params = VETosConnectionParams::parseVeTosFromConfig(config());
|
||||
|
|
|
@ -30,11 +30,12 @@ set (CLICKHOUSE_SERVER_LINK
|
|||
|
||||
clickhouse_program_add(server)
|
||||
|
||||
install(FILES byconity-server.xml byconity-worker.xml byconity-worker-write.xml cnch_config.xml byconity-daemon-manager.xml byconity-tso.xml users.xml byconity-resource-manager.xml DESTINATION "${CLICKHOUSE_ETC_DIR}/byconity-server" COMPONENT clickhouse)
|
||||
install(FILES byconity-server.xml byconity-worker.xml byconity-worker-write.xml byconity-daemon-manager.xml byconity-tso.xml users.xml byconity-resource-manager.xml DESTINATION "${CLICKHOUSE_ETC_DIR}/byconity-server" COMPONENT clickhouse)
|
||||
install(DIRECTORY conf.d DESTINATION "${CLICKHOUSE_ETC_DIR}/byconity-server" COMPONENT clickhouse)
|
||||
|
||||
clickhouse_embed_binaries(
|
||||
TARGET clickhouse_server_configs
|
||||
RESOURCES byconity-server.xml byconity-worker.xml byconity-worker-write.xml cnch_config.xml byconity-daemon-manager.xml byconity-tso.xml users.xml embedded.xml play.html
|
||||
RESOURCES byconity-server.xml byconity-worker.xml byconity-worker-write.xml conf.d/cnch_config.xml byconity-daemon-manager.xml byconity-tso.xml users.xml embedded.xml play.html
|
||||
)
|
||||
add_dependencies(clickhouse-server-lib clickhouse_server_configs)
|
||||
|
||||
|
|
|
@ -649,7 +649,6 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
|||
global_context->addRestrictSettingsToWhitelist(setting_names);
|
||||
}
|
||||
|
||||
global_context->initCnchConfig(config());
|
||||
const UInt64 memory_amount = getMemoryAmount();
|
||||
|
||||
global_context->setBlockPrivilegedOp(config().getBool("restrict_tenanted_users_to_privileged_operations", false));
|
||||
|
@ -699,7 +698,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
|||
if (global_context->getServerType() == ServerType::cnch_server || global_context->getServerType() == ServerType::cnch_worker)
|
||||
global_context->setComplexQueryActive(true);
|
||||
|
||||
MetastoreConfig catalog_conf(global_context->getCnchConfigRef(), CATALOG_SERVICE_CONFIGURE);
|
||||
MetastoreConfig catalog_conf(global_context->getConfigRef(), CATALOG_SERVICE_CONFIGURE);
|
||||
|
||||
std::string current_raw_sd_config;
|
||||
if (config().has("service_discovery")) // only important for local mode (for observing if the sd section is changed)
|
||||
|
@ -1357,19 +1356,19 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
|||
|
||||
#if USE_HDFS
|
||||
/// Init hdfs user
|
||||
std::string hdfs_user = global_context->getCnchConfigRef().getString("hdfs_user", "clickhouse");
|
||||
std::string hdfs_user = global_context->getConfigRef().getString("hdfs_user", "clickhouse");
|
||||
global_context->setHdfsUser(hdfs_user);
|
||||
std::string hdfs_nnproxy = global_context->getCnchConfigRef().getString("hdfs_nnproxy", "nnproxy");
|
||||
std::string hdfs_nnproxy = global_context->getConfigRef().getString("hdfs_nnproxy", "nnproxy");
|
||||
global_context->setHdfsNNProxy(hdfs_nnproxy);
|
||||
|
||||
/// Init HDFS3 client config path
|
||||
std::string hdfs_config = global_context->getCnchConfigRef().getString("hdfs3_config", "");
|
||||
std::string hdfs_config = global_context->getConfigRef().getString("hdfs3_config", "");
|
||||
if (!hdfs_config.empty())
|
||||
{
|
||||
setenv("LIBHDFS3_CONF", hdfs_config.c_str(), 1);
|
||||
}
|
||||
|
||||
HDFSConnectionParams hdfs_params = HDFSConnectionParams::parseHdfsFromConfig(global_context->getCnchConfigRef());
|
||||
HDFSConnectionParams hdfs_params = HDFSConnectionParams::parseHdfsFromConfig(global_context->getConfigRef());
|
||||
global_context->setHdfsConnectionParams(hdfs_params);
|
||||
|
||||
// pre lookup and cache consult result to avoid the overhead of lookupNNProxy
|
||||
|
@ -1438,9 +1437,9 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
|||
|
||||
if( has_hdfs_disk )
|
||||
{
|
||||
const int hdfs_max_fd_num = global_context->getCnchConfigRef().getInt("hdfs_max_fd_num", 100000);
|
||||
const int hdfs_skip_fd_num = global_context->getCnchConfigRef().getInt("hdfs_skip_fd_num", 100);
|
||||
const int hdfs_io_error_num_to_reconnect = global_context->getCnchConfigRef().getInt("hdfs_io_error_num_to_reconnect", 10);
|
||||
const int hdfs_max_fd_num = global_context->getConfigRef().getInt("hdfs_max_fd_num", 100000);
|
||||
const int hdfs_skip_fd_num = global_context->getConfigRef().getInt("hdfs_skip_fd_num", 100);
|
||||
const int hdfs_io_error_num_to_reconnect = global_context->getConfigRef().getInt("hdfs_io_error_num_to_reconnect", 10);
|
||||
registerDefaultHdfsFileSystem(hdfs_params, hdfs_max_fd_num, hdfs_skip_fd_num, hdfs_io_error_num_to_reconnect);
|
||||
}
|
||||
|
||||
|
@ -1499,9 +1498,9 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
|||
{
|
||||
// WARNING: There is a undesired restriction on FDB. Each process could only init one fdb client otherwise it will panic.
|
||||
// so if we use fdb as the kv storage, the config for external and internal catalog must be the same.
|
||||
if (global_context->getCnchConfigRef().has(ExternalCatalog::Mgr::configPrefix()))
|
||||
if (global_context->getConfigRef().has(ExternalCatalog::Mgr::configPrefix()))
|
||||
{
|
||||
ExternalCatalog::Mgr::init(*global_context, global_context->getCnchConfigRef());
|
||||
ExternalCatalog::Mgr::init(*global_context, global_context->getConfigRef());
|
||||
}
|
||||
}
|
||||
/// Check sanity of MergeTreeSettings on server startup
|
||||
|
|
|
@ -90,5 +90,4 @@
|
|||
</cnch_default_hdfs>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
<cnch_config>/etc/byconity-server/cnch_config.xml</cnch_config>
|
||||
</yandex>
|
||||
|
|
|
@ -36,6 +36,4 @@
|
|||
</vw>
|
||||
</vws>
|
||||
</resource_manager>
|
||||
|
||||
<cnch_config>/etc/byconity-server/cnch_config.xml</cnch_config>
|
||||
</yandex>
|
||||
|
|
|
@ -163,6 +163,5 @@
|
|||
<num_probes>21</num_probes>
|
||||
<load_factor>1.3</load_factor>
|
||||
</consistent_hash_ring>
|
||||
<cnch_config>/etc/byconity-server/cnch_config.xml</cnch_config>
|
||||
<custom_settings_prefixes>SQL_</custom_settings_prefixes>
|
||||
</yandex>
|
||||
|
|
|
@ -17,6 +17,5 @@
|
|||
<tso_get_leader_info_interval_ms>0</tso_get_leader_info_interval_ms>
|
||||
<tso_max_retry_count>3</tso_max_retry_count>
|
||||
</tso_service>
|
||||
<cnch_config>/etc/byconity-server/cnch_config.xml</cnch_config>
|
||||
</yandex>
|
||||
|
||||
|
|
|
@ -422,5 +422,4 @@
|
|||
</cnch_default_hdfs>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
<cnch_config>/etc/byconity-server/cnch_config.xml</cnch_config>
|
||||
</yandex>
|
||||
|
|
|
@ -166,6 +166,4 @@
|
|||
</cnch_default_hdfs>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
|
||||
<cnch_config>/etc/byconity-server/cnch_config.xml</cnch_config>
|
||||
</yandex>
|
||||
|
|
|
@ -0,0 +1,112 @@
|
|||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<service_discovery>
|
||||
<mode>local</mode>
|
||||
<cluster>default</cluster>
|
||||
<disable_cache>false</disable_cache>
|
||||
<cache_timeout>5</cache_timeout>
|
||||
<server>
|
||||
<psm>data.cnch.server</psm>
|
||||
<service>cnch-server</service>
|
||||
<headless_service>cnch-server-headless</headless_service>
|
||||
|
||||
<node>
|
||||
<host>{your_server_address}</host>
|
||||
<hostname>Server-6</hostname>
|
||||
<ports>
|
||||
<port>
|
||||
<!--HTTP-->
|
||||
<name>PORT2</name>
|
||||
<value>8123</value>
|
||||
</port>
|
||||
<port>
|
||||
<!--RPC-->
|
||||
<name>PORT1</name>
|
||||
<value>8124</value>
|
||||
</port>
|
||||
<port>
|
||||
<!--TCP/Primary-->
|
||||
<name>PORT0</name>
|
||||
<value>9010</value>
|
||||
</port>
|
||||
<port>
|
||||
<!--Exchange Stream Port-->
|
||||
<name>PORT5</name>
|
||||
<value>9300</value>
|
||||
</port>
|
||||
<port>
|
||||
<!--Exchange Status Port-->
|
||||
<name>PORT6</name>
|
||||
<value>9400</value>
|
||||
</port>
|
||||
</ports>
|
||||
</node>
|
||||
</server>
|
||||
<tso>
|
||||
<psm>data.cnch.tso</psm>
|
||||
|
||||
<service>cnch-tso</service>
|
||||
<headless_service>cnch-tso-headless</headless_service>
|
||||
|
||||
<node>
|
||||
<host>{your_tso_address}</host>
|
||||
<hostname>TSO-0</hostname>
|
||||
<ports>
|
||||
<port>
|
||||
<name>PORT0</name>
|
||||
<value>9910</value>
|
||||
</port>
|
||||
</ports>
|
||||
</node>
|
||||
</tso>
|
||||
<daemon_manager>
|
||||
<psm>data.cnch.daemon_manager</psm>
|
||||
|
||||
<service>cnch-daemonmanager</service>
|
||||
<headless_service>cnch-daemonmanager-headless</headless_service>
|
||||
|
||||
<node>
|
||||
<host>{your_daemonmanager_address}</host>
|
||||
<hostname>Daemon-1</hostname>
|
||||
<ports>
|
||||
<port>
|
||||
<!--RPC-->
|
||||
<name>PORT0</name>
|
||||
<value>9920</value>
|
||||
</port>
|
||||
</ports>
|
||||
</node>
|
||||
</daemon_manager>
|
||||
<resource_manager>
|
||||
<psm>data.cnch.resource_manager</psm>
|
||||
<service>cnch-resourcemanager</service>
|
||||
<headless_service>cnch-resourcemanager-headless</headless_service>
|
||||
<!-- For local test -->
|
||||
<node>
|
||||
<host>{your_resource_manager_address}</host>
|
||||
<hostname>rm0</hostname>
|
||||
<ports>
|
||||
<!-- RPC/Primary -->
|
||||
<port><name>PORT0</name><value>9925</value></port>
|
||||
</ports>
|
||||
</node>
|
||||
</resource_manager>
|
||||
</service_discovery>
|
||||
|
||||
<service_discovery_kv>
|
||||
<election_prefix>your_customized_name_</election_prefix>
|
||||
</service_discovery_kv>
|
||||
|
||||
<catalog>
|
||||
<name_space>catalog_namespace_uniq_string</name_space>
|
||||
</catalog>
|
||||
|
||||
<catalog_service>
|
||||
<type>fdb</type>
|
||||
<fdb>
|
||||
<cluster_file>/etc/byconity-server/fdb.cluster</cluster_file>
|
||||
</fdb>
|
||||
</catalog_service>
|
||||
|
||||
<hdfs_nnproxy>hdfs://localhost:12001</hdfs_nnproxy>
|
||||
</yandex>
|
|
@ -25,15 +25,8 @@ void RootConfiguration::loadFromPocoConfigImpl(const PocoAbstractConfig & config
|
|||
{
|
||||
// resource_manager.loadFromPocoConfig(config, "rm_service");
|
||||
resource_manager.loadFromPocoConfig(config, "resource_manager");
|
||||
|
||||
// load service discovery from cnch_config
|
||||
ConfigurationPtr service_discovery_config;
|
||||
const auto service_discovery_config_path = config.getString("cnch_config");
|
||||
ConfigProcessor config_processor(service_discovery_config_path);
|
||||
const auto loaded_config = config_processor.loadConfig();
|
||||
service_discovery_config = loaded_config.configuration;
|
||||
service_discovery.loadFromPocoConfig(*service_discovery_config, "service_discovery");
|
||||
service_discovery_kv.loadFromPocoConfig(*service_discovery_config, "service_discovery_kv");
|
||||
service_discovery.loadFromPocoConfig(config, "service_discovery");
|
||||
service_discovery_kv.loadFromPocoConfig(config, "service_discovery_kv");
|
||||
queue_manager.loadFromPocoConfig(config, "queue_manager");
|
||||
tso_service.loadFromPocoConfig(config, "tso_service");
|
||||
bulk_synchronous_parallel.loadFromPocoConfig(config, "bulk_synchronous_parallel");
|
||||
|
|
|
@ -352,10 +352,7 @@ int DaemonManager::main(const std::vector<std::string> &)
|
|||
global_context->setSetting("background_schedule_pool_size", config().getUInt64("background_schedule_pool_size", 12));
|
||||
GlobalThreadPool::initialize(config().getUInt("max_thread_pool_size", 100));
|
||||
|
||||
global_context->initCnchConfig(config());
|
||||
|
||||
const Poco::Util::AbstractConfiguration & cnch_config = global_context->getCnchConfigRef();
|
||||
MetastoreConfig catalog_conf(cnch_config, CATALOG_SERVICE_CONFIGURE);
|
||||
MetastoreConfig catalog_conf(config(), CATALOG_SERVICE_CONFIGURE);
|
||||
global_context->initCatalog(catalog_conf, config().getString("catalog.name_space", "default"), config().getBool("enable_cnch_write_remote_catalog", true));
|
||||
global_context->initServiceDiscoveryClient();
|
||||
global_context->initCnchServerClientPool(config().getString("service_discovery.server.psm", "data.cnch.server"));
|
||||
|
|
|
@ -320,7 +320,6 @@ struct ContextSharedPart
|
|||
String dictionaries_lib_path; /// Path to the directory with user provided binaries and libraries for external dictionaries.
|
||||
String metastore_path; /// Path to metastore. We use a seperate path to hold all metastore to make it more easier to manage the metadata on server.
|
||||
ConfigurationPtr config; /// Global configuration settings.
|
||||
ConfigurationPtr cnch_config; /// Config used in cnch.
|
||||
RootConfiguration root_config; /// Predefined global configuration settings.
|
||||
|
||||
String tmp_path; /// Path to the temporary files that occur when processing the request.
|
||||
|
@ -1439,24 +1438,6 @@ void Context::initRootConfig(const Poco::Util::AbstractConfiguration & config)
|
|||
shared->root_config.loadFromPocoConfig(config, "");
|
||||
}
|
||||
|
||||
void Context::initCnchConfig(const Poco::Util::AbstractConfiguration & config)
|
||||
{
|
||||
if (config.has("cnch_config"))
|
||||
{
|
||||
const auto cnch_config_path = config.getString("cnch_config");
|
||||
ConfigProcessor config_processor(cnch_config_path);
|
||||
const auto loaded_config = config_processor.loadConfig();
|
||||
shared->cnch_config = loaded_config.configuration;
|
||||
}
|
||||
else
|
||||
throw Exception("cnch_config not found", ErrorCodes::NO_ELEMENTS_IN_CONFIG);
|
||||
}
|
||||
|
||||
const Poco::Util::AbstractConfiguration & Context::getCnchConfigRef() const
|
||||
{
|
||||
return shared->cnch_config ? *shared->cnch_config : getConfigRef();
|
||||
}
|
||||
|
||||
void Context::updateRootConfig(std::function<void (RootConfiguration &)> update_callback)
|
||||
{
|
||||
update_callback(shared->root_config);
|
||||
|
@ -5203,8 +5184,8 @@ void Context::updateQueueManagerConfig() const
|
|||
|
||||
void Context::initServiceDiscoveryClient()
|
||||
{
|
||||
const auto & cnch_config = getCnchConfigRef();
|
||||
shared->sd = ServiceDiscoveryFactory::instance().create(cnch_config);
|
||||
const auto & config = getConfigRef();
|
||||
shared->sd = ServiceDiscoveryFactory::instance().create(config);
|
||||
}
|
||||
|
||||
ServiceDiscoveryClientPtr Context::getServiceDiscoveryClient() const
|
||||
|
|
|
@ -753,9 +753,6 @@ public:
|
|||
const RootConfiguration & getRootConfig() const;
|
||||
void reloadRootConfig(const Poco::Util::AbstractConfiguration & poco_config);
|
||||
|
||||
void initCnchConfig(const Poco::Util::AbstractConfiguration & poco_config);
|
||||
const Poco::Util::AbstractConfiguration & getCnchConfigRef() const;
|
||||
|
||||
AccessControlManager & getAccessControlManager();
|
||||
const AccessControlManager & getAccessControlManager() const;
|
||||
|
||||
|
|
|
@ -115,14 +115,13 @@ int ResourceManager::main(const std::vector<std::string> &)
|
|||
global_context->makeGlobalContext();
|
||||
global_context->setServerType(config().getString("cnch_type", "resource_manager"));
|
||||
global_context->setApplicationType(Context::ApplicationType::SERVER);
|
||||
global_context->initCnchConfig(config());
|
||||
global_context->initRootConfig(config());
|
||||
|
||||
global_context->initServiceDiscoveryClient();
|
||||
|
||||
/// Initialize catalog
|
||||
MetastoreConfig catalog_conf(global_context->getCnchConfigRef(), CATALOG_SERVICE_CONFIGURE);
|
||||
auto name_space = global_context->getCnchConfigRef().getString("catalog.name_space", "default");
|
||||
MetastoreConfig catalog_conf(global_context->getConfigRef(), CATALOG_SERVICE_CONFIGURE);
|
||||
auto name_space = global_context->getConfigRef().getString("catalog.name_space", "default");
|
||||
global_context->initCatalog(catalog_conf, name_space, config().getBool("enable_cnch_write_remote_catalog", true));
|
||||
global_context->initTSOClientPool(config().getString("service_discovery.tso.psm", "data.cnch.tso"));
|
||||
global_context->initTSOElectionReader();
|
||||
|
|
|
@ -286,7 +286,7 @@ void TSOServer::initLeaderElection()
|
|||
{
|
||||
LOG_DEBUG(log, "Enter leader election");
|
||||
|
||||
auto election_metastore = Catalog::getMetastorePtr(MetastoreConfig{global_context->getCnchConfigRef(), CATALOG_SERVICE_CONFIGURE});
|
||||
auto election_metastore = Catalog::getMetastorePtr(MetastoreConfig{global_context->getConfigRef(), CATALOG_SERVICE_CONFIGURE});
|
||||
|
||||
auto prefix = global_context->getRootConfig().service_discovery_kv.election_prefix.value;
|
||||
leader_election = std::make_unique<StorageElector>(
|
||||
|
@ -326,7 +326,6 @@ int TSOServer::main(const std::vector<std::string> &)
|
|||
global_context = Context::createGlobal(shared_context.get());
|
||||
|
||||
global_context->makeGlobalContext();
|
||||
global_context->initCnchConfig(config());
|
||||
global_context->initRootConfig(config());
|
||||
global_context->initServiceDiscoveryClient();
|
||||
global_context->setApplicationType(Context::ApplicationType::TSO);
|
||||
|
|
Loading…
Reference in New Issue