diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0f895c7c482..cc1ec835a7b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -11,6 +11,8 @@
* Now replicas that are processing the `ALTER TABLE ATTACH PART[ITION]` command search in their `detached/` folders before fetching the data from other replicas. As an implementation detail, a new command `ATTACH_PART` is introduced in the replicated log. Parts are searched and compared by their checksums. [#18978](https://github.com/ClickHouse/ClickHouse/pull/18978) ([Mike Kot](https://github.com/myrrc)). **Note**:
* `ATTACH PART[ITION]` queries may not work during cluster upgrade.
* It's not possible to rollback to older ClickHouse version after executing `ALTER ... ATTACH` query in new version as the old servers would fail to pass the `ATTACH_PART` entry in the replicated log.
+* In this version, empty `` will block all access to remote hosts while in previous versions it did nothing. If you want to keep old behaviour and you have empty `remote_url_allow_hosts` element in configuration file, remove it. [#20058](https://github.com/ClickHouse/ClickHouse/pull/20058) ([Vladimir Chebotarev](https://github.com/excitoon)).
+
#### New Feature
@@ -132,7 +134,6 @@
* Fix receive and send timeouts and non-blocking read in secure socket. [#21429](https://github.com/ClickHouse/ClickHouse/pull/21429) ([Kruglov Pavel](https://github.com/Avogar)).
* `force_drop_table` flag didn't work for `MATERIALIZED VIEW`, it's fixed. Fixes [#18943](https://github.com/ClickHouse/ClickHouse/issues/18943). [#20626](https://github.com/ClickHouse/ClickHouse/pull/20626) ([tavplubix](https://github.com/tavplubix)).
* Fix name clashes in `PredicateRewriteVisitor`. It caused incorrect `WHERE` filtration after full join. Close [#20497](https://github.com/ClickHouse/ClickHouse/issues/20497). [#20622](https://github.com/ClickHouse/ClickHouse/pull/20622) ([Vladimir](https://github.com/vdimir)).
-* Fixed open behavior of remote host filter in case when there is `remote_url_allow_hosts` section in configuration but no entries there. [#20058](https://github.com/ClickHouse/ClickHouse/pull/20058) ([Vladimir Chebotarev](https://github.com/excitoon)).
#### Build/Testing/Packaging Improvement
diff --git a/src/Common/BorrowedObjectPool.h b/base/common/BorrowedObjectPool.h
similarity index 99%
rename from src/Common/BorrowedObjectPool.h
rename to base/common/BorrowedObjectPool.h
index d5263cf92a8..6a90a7e7122 100644
--- a/src/Common/BorrowedObjectPool.h
+++ b/base/common/BorrowedObjectPool.h
@@ -7,8 +7,7 @@
#include
#include
-
-#include
+#include
/** Pool for limited size objects that cannot be used from different threads simultaneously.
* The main use case is to have fixed size of objects that can be reused in difference threads during their lifetime
diff --git a/src/Common/MoveOrCopyIfThrow.h b/base/common/MoveOrCopyIfThrow.h
similarity index 100%
rename from src/Common/MoveOrCopyIfThrow.h
rename to base/common/MoveOrCopyIfThrow.h
diff --git a/contrib/NuRaft b/contrib/NuRaft
index c35819f2c8a..d2feb5978b9 160000
--- a/contrib/NuRaft
+++ b/contrib/NuRaft
@@ -1 +1 @@
-Subproject commit c35819f2c8a378d4ba88cc930c17bc20aeb875eb
+Subproject commit d2feb5978b979729a07c3ca76eaa4ab94cef4ceb
diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh
index 14c6ee0d337..a42cb25f6f0 100755
--- a/docker/test/fasttest/run.sh
+++ b/docker/test/fasttest/run.sh
@@ -366,6 +366,9 @@ function run_tests
# JSON functions
01666_blns
+
+ # Depends on AWS
+ 01801_s3_cluster
)
(time clickhouse-test --hung-check -j 8 --order=random --use-skip-list --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" -- "$FASTTEST_FOCUS" 2>&1 ||:) | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt"
diff --git a/docker/test/stress/stress b/docker/test/stress/stress
index 25a705ecbd1..4fbedceb0b8 100755
--- a/docker/test/stress/stress
+++ b/docker/test/stress/stress
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from multiprocessing import cpu_count
-from subprocess import Popen, call, STDOUT
+from subprocess import Popen, call, check_output, STDOUT
import os
import sys
import shutil
@@ -85,10 +85,27 @@ def prepare_for_hung_check():
# Issue #21004, live views are experimental, so let's just suppress it
call("""clickhouse client -q "KILL QUERY WHERE upper(query) LIKE 'WATCH %'" """, shell=True, stderr=STDOUT)
- # Wait for last queries to finish if any, not longer than 120 seconds
+ # Kill other queries which known to be slow
+ # It's query from 01232_preparing_sets_race_condition_long, it may take up to 1000 seconds in slow builds
+ call("""clickhouse client -q "KILL QUERY WHERE query LIKE 'insert into tableB select %'" """, shell=True, stderr=STDOUT)
+ # Long query from 00084_external_agregation
+ call("""clickhouse client -q "KILL QUERY WHERE query LIKE 'SELECT URL, uniq(SearchPhrase) AS u FROM test.hits GROUP BY URL ORDER BY u %'" """, shell=True, stderr=STDOUT)
+
+ # Wait for last queries to finish if any, not longer than 300 seconds
call("""clickhouse client -q "select sleepEachRow((
- select maxOrDefault(120 - elapsed) + 1 from system.processes where query not like '%from system.processes%' and elapsed < 120
- ) / 120) from numbers(120) format Null" """, shell=True, stderr=STDOUT)
+ select maxOrDefault(300 - elapsed) + 1 from system.processes where query not like '%from system.processes%' and elapsed < 300
+ ) / 300) from numbers(300) format Null" """, shell=True, stderr=STDOUT)
+
+ # Even if all clickhouse-test processes are finished, there are probably some sh scripts,
+ # which still run some new queries. Let's ignore them.
+ try:
+ query = """clickhouse client -q "SELECT count() FROM system.processes where where elapsed > 300" """
+ output = check_output(query, shell=True, stderr=STDOUT).decode('utf-8').strip()
+ if int(output) == 0:
+ return False
+ except:
+ pass
+ return True
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
@@ -119,12 +136,12 @@ if __name__ == "__main__":
logging.info("All processes finished")
if args.hung_check:
- prepare_for_hung_check()
+ have_long_running_queries = prepare_for_hung_check()
logging.info("Checking if some queries hung")
cmd = "{} {} {}".format(args.test_cmd, "--hung-check", "00001_select_1")
res = call(cmd, shell=True, stderr=STDOUT)
hung_check_status = "No queries hung\tOK\n"
- if res != 0:
+ if res != 0 and have_long_running_queries:
logging.info("Hung check failed with exit code {}".format(res))
hung_check_status = "Hung check failed\tFAIL\n"
open(os.path.join(args.output_folder, "test_results.tsv"), 'w+').write(hung_check_status)
diff --git a/docs/en/engines/table-engines/integrations/postgresql.md b/docs/en/engines/table-engines/integrations/postgresql.md
index ad5bebb3dea..4474b764d2e 100644
--- a/docs/en/engines/table-engines/integrations/postgresql.md
+++ b/docs/en/engines/table-engines/integrations/postgresql.md
@@ -94,10 +94,10 @@ postgres=# INSERT INTO test (int_id, str, "float") VALUES (1,'test',2);
INSERT 0 1
postgresql> SELECT * FROM test;
- int_id | int_nullable | float | str | float_nullable
---------+--------------+-------+------+----------------
- 1 | | 2 | test |
-(1 row)
+ int_id | int_nullable | float | str | float_nullable
+ --------+--------------+-------+------+----------------
+ 1 | | 2 | test |
+ (1 row)
```
Table in ClickHouse, retrieving data from the PostgreSQL table created above:
diff --git a/docs/en/sql-reference/functions/bitmap-functions.md b/docs/en/sql-reference/functions/bitmap-functions.md
index 7ec400949e9..4875532605e 100644
--- a/docs/en/sql-reference/functions/bitmap-functions.md
+++ b/docs/en/sql-reference/functions/bitmap-functions.md
@@ -33,7 +33,7 @@ SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res);
``` text
┌─res─┬─toTypeName(bitmapBuild([1, 2, 3, 4, 5]))─────┐
-│ │ AggregateFunction(groupBitmap, UInt8) │
+│ │ AggregateFunction(groupBitmap, UInt8) │
└─────┴──────────────────────────────────────────────┘
```
diff --git a/docs/en/sql-reference/functions/hash-functions.md b/docs/en/sql-reference/functions/hash-functions.md
index c60067b06af..0ea4cfd6fbe 100644
--- a/docs/en/sql-reference/functions/hash-functions.md
+++ b/docs/en/sql-reference/functions/hash-functions.md
@@ -437,13 +437,13 @@ A [FixedString(16)](../../sql-reference/data-types/fixedstring.md) data type has
**Example**
``` sql
-SELECT murmurHash3_128('example_string') AS MurmurHash3, toTypeName(MurmurHash3) AS type;
+SELECT hex(murmurHash3_128('example_string')) AS MurmurHash3, toTypeName(MurmurHash3) AS type;
```
``` text
-┌─MurmurHash3──────┬─type────────────┐
-│ 6�1�4"S5KT�~~q │ FixedString(16) │
-└──────────────────┴─────────────────┘
+┌─MurmurHash3──────────────────────┬─type───┐
+│ 368A1A311CB7342253354B548E7E7E71 │ String │
+└──────────────────────────────────┴────────┘
```
## xxHash32, xxHash64 {#hash-functions-xxhash32}
diff --git a/docs/en/sql-reference/table-functions/postgresql.md b/docs/en/sql-reference/table-functions/postgresql.md
index bfb5fdf9be6..3eab572ac12 100644
--- a/docs/en/sql-reference/table-functions/postgresql.md
+++ b/docs/en/sql-reference/table-functions/postgresql.md
@@ -65,9 +65,9 @@ postgres=# INSERT INTO test (int_id, str, "float") VALUES (1,'test',2);
INSERT 0 1
postgresql> SELECT * FROM test;
- int_id | int_nullable | float | str | float_nullable
---------+--------------+-------+------+----------------
- 1 | | 2 | test |
+ int_id | int_nullable | float | str | float_nullable
+ --------+--------------+-------+------+----------------
+ 1 | | 2 | test |
(1 row)
```
diff --git a/docs/ja/sql-reference/functions/bitmap-functions.md b/docs/ja/sql-reference/functions/bitmap-functions.md
index cc57e762610..de3ce938444 100644
--- a/docs/ja/sql-reference/functions/bitmap-functions.md
+++ b/docs/ja/sql-reference/functions/bitmap-functions.md
@@ -35,7 +35,7 @@ SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res)
``` text
┌─res─┬─toTypeName(bitmapBuild([1, 2, 3, 4, 5]))─────┐
-│ │ AggregateFunction(groupBitmap, UInt8) │
+│ │ AggregateFunction(groupBitmap, UInt8) │
└─────┴──────────────────────────────────────────────┘
```
diff --git a/docs/ja/sql-reference/functions/hash-functions.md b/docs/ja/sql-reference/functions/hash-functions.md
index d48e6846bb4..a98ae60690d 100644
--- a/docs/ja/sql-reference/functions/hash-functions.md
+++ b/docs/ja/sql-reference/functions/hash-functions.md
@@ -434,13 +434,13 @@ A [FixedString(16)](../../sql-reference/data-types/fixedstring.md) データ型
**例**
``` sql
-SELECT murmurHash3_128('example_string') AS MurmurHash3, toTypeName(MurmurHash3) AS type
+SELECT hex(murmurHash3_128('example_string')) AS MurmurHash3, toTypeName(MurmurHash3) AS type;
```
``` text
-┌─MurmurHash3──────┬─type────────────┐
-│ 6�1�4"S5KT�~~q │ FixedString(16) │
-└──────────────────┴─────────────────┘
+┌─MurmurHash3──────────────────────┬─type───┐
+│ 368A1A311CB7342253354B548E7E7E71 │ String │
+└──────────────────────────────────┴────────┘
```
## xxHash32,xxHash64 {#hash-functions-xxhash32}
diff --git a/docs/ru/engines/table-engines/integrations/postgresql.md b/docs/ru/engines/table-engines/integrations/postgresql.md
index 064665d49c1..cb8e38ae5c9 100644
--- a/docs/ru/engines/table-engines/integrations/postgresql.md
+++ b/docs/ru/engines/table-engines/integrations/postgresql.md
@@ -94,10 +94,10 @@ postgres=# INSERT INTO test (int_id, str, "float") VALUES (1,'test',2);
INSERT 0 1
postgresql> SELECT * FROM test;
- int_id | int_nullable | float | str | float_nullable
---------+--------------+-------+------+----------------
- 1 | | 2 | test |
-(1 row)
+ int_id | int_nullable | float | str | float_nullable
+ --------+--------------+-------+------+----------------
+ 1 | | 2 | test |
+ (1 row)
```
Таблица в ClickHouse, получение данных из PostgreSQL таблицы, созданной выше:
diff --git a/docs/ru/sql-reference/functions/bitmap-functions.md b/docs/ru/sql-reference/functions/bitmap-functions.md
index ddae2f3eb40..3da729664d0 100644
--- a/docs/ru/sql-reference/functions/bitmap-functions.md
+++ b/docs/ru/sql-reference/functions/bitmap-functions.md
@@ -25,7 +25,7 @@ SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res);
``` text
┌─res─┬─toTypeName(bitmapBuild([1, 2, 3, 4, 5]))─────┐
-│ │ AggregateFunction(groupBitmap, UInt8) │
+│ │ AggregateFunction(groupBitmap, UInt8) │
└─────┴──────────────────────────────────────────────┘
```
diff --git a/docs/ru/sql-reference/functions/hash-functions.md b/docs/ru/sql-reference/functions/hash-functions.md
index 2efff9c3727..07c741e0588 100644
--- a/docs/ru/sql-reference/functions/hash-functions.md
+++ b/docs/ru/sql-reference/functions/hash-functions.md
@@ -430,7 +430,7 @@ murmurHash3_128( expr )
**Аргументы**
-- `expr` — [выражение](../syntax.md#syntax-expressions), возвращающее значение типа[String](../../sql-reference/functions/hash-functions.md).
+- `expr` — [выражение](../syntax.md#syntax-expressions), возвращающее значение типа [String](../../sql-reference/functions/hash-functions.md).
**Возвращаемое значение**
@@ -439,13 +439,13 @@ murmurHash3_128( expr )
**Пример**
``` sql
-SELECT murmurHash3_128('example_string') AS MurmurHash3, toTypeName(MurmurHash3) AS type;
+SELECT hex(murmurHash3_128('example_string')) AS MurmurHash3, toTypeName(MurmurHash3) AS type;
```
``` text
-┌─MurmurHash3──────┬─type────────────┐
-│ 6�1�4"S5KT�~~q │ FixedString(16) │
-└──────────────────┴─────────────────┘
+┌─MurmurHash3──────────────────────┬─type───┐
+│ 368A1A311CB7342253354B548E7E7E71 │ String │
+└──────────────────────────────────┴────────┘
```
## xxHash32, xxHash64 {#hash-functions-xxhash32-xxhash64}
diff --git a/docs/ru/sql-reference/table-functions/postgresql.md b/docs/ru/sql-reference/table-functions/postgresql.md
index 66637276726..2d8afe28f1e 100644
--- a/docs/ru/sql-reference/table-functions/postgresql.md
+++ b/docs/ru/sql-reference/table-functions/postgresql.md
@@ -65,10 +65,10 @@ postgres=# INSERT INTO test (int_id, str, "float") VALUES (1,'test',2);
INSERT 0 1
postgresql> SELECT * FROM test;
- int_id | int_nullable | float | str | float_nullable
---------+--------------+-------+------+----------------
- 1 | | 2 | test |
-(1 row)
+ int_id | int_nullable | float | str | float_nullable
+ --------+--------------+-------+------+----------------
+ 1 | | 2 | test |
+ (1 row)
```
Получение данных в ClickHouse:
diff --git a/docs/tools/single_page.py b/docs/tools/single_page.py
index b88df5a03cb..a1e650d3ad3 100644
--- a/docs/tools/single_page.py
+++ b/docs/tools/single_page.py
@@ -109,7 +109,8 @@ def build_single_page_version(lang, args, nav, cfg):
extra['single_page'] = True
extra['is_amp'] = False
- with open(os.path.join(args.docs_dir, lang, 'single.md'), 'w') as single_md:
+ single_md_path = os.path.join(args.docs_dir, lang, 'single.md')
+ with open(single_md_path, 'w') as single_md:
concatenate(lang, args.docs_dir, single_md, nav)
with util.temp_dir() as site_temp:
@@ -221,3 +222,7 @@ def build_single_page_version(lang, args, nav, cfg):
subprocess.check_call(' '.join(create_pdf_command), shell=True)
logging.info(f'Finished building single page version for {lang}')
+
+ if os.path.exists(single_md_path):
+ os.unlink(single_md_path)
+
\ No newline at end of file
diff --git a/programs/client/Suggest.cpp b/programs/client/Suggest.cpp
index dfa7048349e..8d4c0fdbd5a 100644
--- a/programs/client/Suggest.cpp
+++ b/programs/client/Suggest.cpp
@@ -108,14 +108,6 @@ void Suggest::loadImpl(Connection & connection, const ConnectionTimeouts & timeo
" UNION ALL "
"SELECT cluster FROM system.clusters"
" UNION ALL "
- "SELECT name FROM system.errors"
- " UNION ALL "
- "SELECT event FROM system.events"
- " UNION ALL "
- "SELECT metric FROM system.asynchronous_metrics"
- " UNION ALL "
- "SELECT metric FROM system.metrics"
- " UNION ALL "
"SELECT macro FROM system.macros"
" UNION ALL "
"SELECT policy_name FROM system.storage_policies"
@@ -139,17 +131,12 @@ void Suggest::loadImpl(Connection & connection, const ConnectionTimeouts & timeo
query << ") WHERE notEmpty(res)";
- Settings settings;
- /// To show all rows from:
- /// - system.errors
- /// - system.events
- settings.system_events_show_zero_values = true;
- fetch(connection, timeouts, query.str(), settings);
+ fetch(connection, timeouts, query.str());
}
-void Suggest::fetch(Connection & connection, const ConnectionTimeouts & timeouts, const std::string & query, Settings & settings)
+void Suggest::fetch(Connection & connection, const ConnectionTimeouts & timeouts, const std::string & query)
{
- connection.sendQuery(timeouts, query, "" /* query_id */, QueryProcessingStage::Complete, &settings);
+ connection.sendQuery(timeouts, query, "" /* query_id */, QueryProcessingStage::Complete);
while (true)
{
diff --git a/programs/client/Suggest.h b/programs/client/Suggest.h
index 0049bc08ebf..03332088cbe 100644
--- a/programs/client/Suggest.h
+++ b/programs/client/Suggest.h
@@ -33,7 +33,7 @@ public:
private:
void loadImpl(Connection & connection, const ConnectionTimeouts & timeouts, size_t suggestion_limit);
- void fetch(Connection & connection, const ConnectionTimeouts & timeouts, const std::string & query, Settings & settings);
+ void fetch(Connection & connection, const ConnectionTimeouts & timeouts, const std::string & query);
void fillWordsFromBlock(const Block & block);
/// Words are fetched asynchronously.
diff --git a/programs/install/Install.cpp b/programs/install/Install.cpp
index ef72624e7ab..3cab7a0ce96 100644
--- a/programs/install/Install.cpp
+++ b/programs/install/Install.cpp
@@ -71,6 +71,9 @@ namespace ErrorCodes
}
+/// ANSI escape sequence for intense color in terminal.
+#define HILITE "\033[1m"
+#define END_HILITE "\033[0m"
using namespace DB;
namespace po = boost::program_options;
@@ -563,12 +566,12 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
if (has_password_for_default_user)
{
- fmt::print("Password for default user is already specified. To remind or reset, see {} and {}.\n",
+ fmt::print(HILITE "Password for default user is already specified. To remind or reset, see {} and {}." END_HILITE,
users_config_file.string(), users_d.string());
}
else if (!is_interactive)
{
- fmt::print("Password for default user is empty string. See {} and {} to change it.\n",
+ fmt::print(HILITE "Password for default user is empty string. See {} and {} to change it." END_HILITE,
users_config_file.string(), users_d.string());
}
else
diff --git a/programs/library-bridge/CMakeLists.txt b/programs/library-bridge/CMakeLists.txt
index 5ceff47ee0c..0913c6e4a9a 100644
--- a/programs/library-bridge/CMakeLists.txt
+++ b/programs/library-bridge/CMakeLists.txt
@@ -1,6 +1,6 @@
set (CLICKHOUSE_LIBRARY_BRIDGE_SOURCES
library-bridge.cpp
- library-log.cpp
+ LibraryInterface.cpp
LibraryBridge.cpp
Handlers.cpp
HandlerFactory.cpp
diff --git a/src/Dictionaries/LibraryDictionarySourceExternal.cpp b/programs/library-bridge/LibraryInterface.cpp
similarity index 97%
rename from src/Dictionaries/LibraryDictionarySourceExternal.cpp
rename to programs/library-bridge/LibraryInterface.cpp
index 259d0a2846a..3975368c17f 100644
--- a/src/Dictionaries/LibraryDictionarySourceExternal.cpp
+++ b/programs/library-bridge/LibraryInterface.cpp
@@ -1,4 +1,5 @@
-#include "LibraryDictionarySourceExternal.h"
+#include "LibraryInterface.h"
+
#include
namespace
diff --git a/src/Dictionaries/LibraryDictionarySourceExternal.h b/programs/library-bridge/LibraryInterface.h
similarity index 100%
rename from src/Dictionaries/LibraryDictionarySourceExternal.h
rename to programs/library-bridge/LibraryInterface.h
diff --git a/programs/library-bridge/LibraryUtils.h b/programs/library-bridge/LibraryUtils.h
index 359d1de93e3..8ced8df1c48 100644
--- a/programs/library-bridge/LibraryUtils.h
+++ b/programs/library-bridge/LibraryUtils.h
@@ -1,11 +1,12 @@
#pragma once
#include
-#include
#include
#include
#include
+#include "LibraryInterface.h"
+
namespace DB
{
diff --git a/programs/library-bridge/library-log.cpp b/programs/library-bridge/library-log.cpp
deleted file mode 100644
index 89fb31623b3..00000000000
--- a/programs/library-bridge/library-log.cpp
+++ /dev/null
@@ -1,66 +0,0 @@
-#include
-#include
-
-namespace
-{
-const char DICT_LOGGER_NAME[] = "LibraryDictionarySourceExternal";
-}
-
-namespace ClickHouseLibrary
-{
-
-std::string_view LIBRARY_CREATE_NEW_FUNC_NAME = "ClickHouseDictionary_v3_libNew";
-std::string_view LIBRARY_CLONE_FUNC_NAME = "ClickHouseDictionary_v3_libClone";
-std::string_view LIBRARY_DELETE_FUNC_NAME = "ClickHouseDictionary_v3_libDelete";
-
-std::string_view LIBRARY_DATA_NEW_FUNC_NAME = "ClickHouseDictionary_v3_dataNew";
-std::string_view LIBRARY_DATA_DELETE_FUNC_NAME = "ClickHouseDictionary_v3_dataDelete";
-
-std::string_view LIBRARY_LOAD_ALL_FUNC_NAME = "ClickHouseDictionary_v3_loadAll";
-std::string_view LIBRARY_LOAD_IDS_FUNC_NAME = "ClickHouseDictionary_v3_loadIds";
-std::string_view LIBRARY_LOAD_KEYS_FUNC_NAME = "ClickHouseDictionary_v3_loadKeys";
-
-std::string_view LIBRARY_IS_MODIFIED_FUNC_NAME = "ClickHouseDictionary_v3_isModified";
-std::string_view LIBRARY_SUPPORTS_SELECTIVE_LOAD_FUNC_NAME = "ClickHouseDictionary_v3_supportsSelectiveLoad";
-
-void log(LogLevel level, CString msg)
-{
- auto & logger = Poco::Logger::get(DICT_LOGGER_NAME);
- switch (level)
- {
- case LogLevel::TRACE:
- if (logger.trace())
- logger.trace(msg);
- break;
- case LogLevel::DEBUG:
- if (logger.debug())
- logger.debug(msg);
- break;
- case LogLevel::INFORMATION:
- if (logger.information())
- logger.information(msg);
- break;
- case LogLevel::NOTICE:
- if (logger.notice())
- logger.notice(msg);
- break;
- case LogLevel::WARNING:
- if (logger.warning())
- logger.warning(msg);
- break;
- case LogLevel::ERROR:
- if (logger.error())
- logger.error(msg);
- break;
- case LogLevel::CRITICAL:
- if (logger.critical())
- logger.critical(msg);
- break;
- case LogLevel::FATAL:
- if (logger.fatal())
- logger.fatal(msg);
- break;
- }
-}
-
-}
diff --git a/programs/odbc-bridge/ODBCConnectionFactory.h b/programs/odbc-bridge/ODBCConnectionFactory.h
index 958cf03cfce..56961ddb2fb 100644
--- a/programs/odbc-bridge/ODBCConnectionFactory.h
+++ b/programs/odbc-bridge/ODBCConnectionFactory.h
@@ -3,7 +3,7 @@
#include
#include
#include
-#include
+#include
#include
diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp
index eeb46ca726d..e3b4316079c 100644
--- a/programs/server/Server.cpp
+++ b/programs/server/Server.cpp
@@ -173,18 +173,24 @@ int waitServersToFinish(std::vector & servers, size_t
const int sleep_one_ms = 100;
int sleep_current_ms = 0;
int current_connections = 0;
- while (sleep_current_ms < sleep_max_ms)
+ for (;;)
{
current_connections = 0;
+
for (auto & server : servers)
{
server.stop();
current_connections += server.currentConnections();
}
+
if (!current_connections)
break;
+
sleep_current_ms += sleep_one_ms;
- std::this_thread::sleep_for(std::chrono::milliseconds(sleep_one_ms));
+ if (sleep_current_ms < sleep_max_ms)
+ std::this_thread::sleep_for(std::chrono::milliseconds(sleep_one_ms));
+ else
+ break;
}
return current_connections;
}
@@ -879,10 +885,30 @@ int Server::main(const std::vector & /*args*/)
servers_to_start_before_tables->emplace_back(
port_name,
std::make_unique(
- new KeeperTCPHandlerFactory(*this), server_pool, socket, new Poco::Net::TCPServerParams));
+ new KeeperTCPHandlerFactory(*this, false), server_pool, socket, new Poco::Net::TCPServerParams));
LOG_INFO(log, "Listening for connections to Keeper (tcp): {}", address.toString());
});
+
+ const char * secure_port_name = "keeper_server.tcp_port_secure";
+ createServer(listen_host, secure_port_name, listen_try, [&](UInt16 port)
+ {
+#if USE_SSL
+ Poco::Net::SecureServerSocket socket;
+ auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
+ socket.setReceiveTimeout(settings.receive_timeout);
+ socket.setSendTimeout(settings.send_timeout);
+ servers_to_start_before_tables->emplace_back(
+ secure_port_name,
+ std::make_unique(
+ new KeeperTCPHandlerFactory(*this, true), server_pool, socket, new Poco::Net::TCPServerParams));
+ LOG_INFO(log, "Listening for connections to Keeper with secure protocol (tcp_secure): {}", address.toString());
+#else
+ UNUSED(port);
+ throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
+ ErrorCodes::SUPPORT_IS_DISABLED};
+#endif
+ });
}
#else
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "ClickHouse server built without NuRaft library. Cannot use internal coordination.");
@@ -931,6 +957,9 @@ int Server::main(const std::vector & /*args*/)
global_context->shutdownKeeperStorageDispatcher();
}
+ /// Wait server pool to avoid use-after-free of destroyed context in the handlers
+ server_pool.joinAll();
+
/** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available.
* At this moment, no one could own shared part of Context.
*/
diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp
index 939a48d949f..70d8109545b 100644
--- a/src/Client/Connection.cpp
+++ b/src/Client/Connection.cpp
@@ -551,6 +551,15 @@ void Connection::sendIgnoredPartUUIDs(const std::vector & uuids)
out->next();
}
+
+void Connection::sendReadTaskResponse(const String & response)
+{
+ writeVarUInt(Protocol::Client::ReadTaskResponse, *out);
+ writeVarUInt(DBMS_CLUSTER_PROCESSING_PROTOCOL_VERSION, *out);
+ writeStringBinary(response, *out);
+ out->next();
+}
+
void Connection::sendPreparedData(ReadBuffer & input, size_t size, const String & name)
{
/// NOTE 'Throttler' is not used in this method (could use, but it's not important right now).
@@ -807,6 +816,9 @@ Packet Connection::receivePacket()
readVectorBinary(res.part_uuids, *in);
return res;
+ case Protocol::Server::ReadTaskRequest:
+ return res;
+
default:
/// In unknown state, disconnect - to not leave unsynchronised connection.
disconnect();
@@ -907,13 +919,13 @@ void Connection::setDescription()
}
-std::unique_ptr Connection::receiveException()
+std::unique_ptr Connection::receiveException() const
{
return std::make_unique(readException(*in, "Received from " + getDescription(), true /* remote */));
}
-std::vector Connection::receiveMultistringMessage(UInt64 msg_type)
+std::vector Connection::receiveMultistringMessage(UInt64 msg_type) const
{
size_t num = Protocol::Server::stringsInMessage(msg_type);
std::vector strings(num);
@@ -923,7 +935,7 @@ std::vector Connection::receiveMultistringMessage(UInt64 msg_type)
}
-Progress Connection::receiveProgress()
+Progress Connection::receiveProgress() const
{
Progress progress;
progress.read(*in, server_revision);
@@ -931,7 +943,7 @@ Progress Connection::receiveProgress()
}
-BlockStreamProfileInfo Connection::receiveProfileInfo()
+BlockStreamProfileInfo Connection::receiveProfileInfo() const
{
BlockStreamProfileInfo profile_info;
profile_info.read(*in);
diff --git a/src/Client/Connection.h b/src/Client/Connection.h
index 65ed956a60b..b4b0d36fb1f 100644
--- a/src/Client/Connection.h
+++ b/src/Client/Connection.h
@@ -159,6 +159,8 @@ public:
/// Send parts' uuids to excluded them from query processing
void sendIgnoredPartUUIDs(const std::vector & uuids);
+ void sendReadTaskResponse(const String &);
+
/// Send prepared block of data (serialized and, if need, compressed), that will be read from 'input'.
/// You could pass size of serialized/compressed block.
void sendPreparedData(ReadBuffer & input, size_t size, const String & name = "");
@@ -269,7 +271,7 @@ private:
class LoggerWrapper
{
public:
- LoggerWrapper(Connection & parent_)
+ explicit LoggerWrapper(Connection & parent_)
: log(nullptr), parent(parent_)
{
}
@@ -304,10 +306,10 @@ private:
Block receiveLogData();
Block receiveDataImpl(BlockInputStreamPtr & stream);
- std::vector receiveMultistringMessage(UInt64 msg_type);
- std::unique_ptr receiveException();
- Progress receiveProgress();
- BlockStreamProfileInfo receiveProfileInfo();
+ std::vector receiveMultistringMessage(UInt64 msg_type) const;
+ std::unique_ptr receiveException() const;
+ Progress receiveProgress() const;
+ BlockStreamProfileInfo receiveProfileInfo() const;
void initInputBuffers();
void initBlockInput();
diff --git a/src/Client/ConnectionPool.h b/src/Client/ConnectionPool.h
index 9e1d5f78b03..bf73e9756d2 100644
--- a/src/Client/ConnectionPool.h
+++ b/src/Client/ConnectionPool.h
@@ -26,7 +26,7 @@ public:
using Entry = PoolBase::Entry;
public:
- virtual ~IConnectionPool() {}
+ virtual ~IConnectionPool() = default;
/// Selects the connection to work.
/// If force_connected is false, the client must manually ensure that returned connection is good.
diff --git a/src/Client/HedgedConnections.h b/src/Client/HedgedConnections.h
index f1675108349..9f7d8837536 100644
--- a/src/Client/HedgedConnections.h
+++ b/src/Client/HedgedConnections.h
@@ -14,6 +14,12 @@
namespace DB
{
+namespace ErrorCodes
+{
+ extern const int LOGICAL_ERROR;
+}
+
+
/** To receive data from multiple replicas (connections) from one shard asynchronously.
* The principe of Hedged Connections is used to reduce tail latency:
* if we don't receive data from replica and there is no progress in query execution
@@ -84,6 +90,11 @@ public:
const ClientInfo & client_info,
bool with_pending_data) override;
+ void sendReadTaskResponse(const String &) override
+ {
+ throw Exception("sendReadTaskResponse in not supported with HedgedConnections", ErrorCodes::LOGICAL_ERROR);
+ }
+
Packet receivePacket() override;
Packet receivePacketUnlocked(AsyncCallback async_callback) override;
diff --git a/src/Client/IConnections.h b/src/Client/IConnections.h
index 38730922456..d251a5fb3ab 100644
--- a/src/Client/IConnections.h
+++ b/src/Client/IConnections.h
@@ -24,6 +24,8 @@ public:
const ClientInfo & client_info,
bool with_pending_data) = 0;
+ virtual void sendReadTaskResponse(const String &) = 0;
+
/// Get packet from any replica.
virtual Packet receivePacket() = 0;
diff --git a/src/Client/MultiplexedConnections.cpp b/src/Client/MultiplexedConnections.cpp
index 8b2b7c49f26..2992e991df7 100644
--- a/src/Client/MultiplexedConnections.cpp
+++ b/src/Client/MultiplexedConnections.cpp
@@ -155,6 +155,15 @@ void MultiplexedConnections::sendIgnoredPartUUIDs(const std::vector & uuid
}
}
+
+void MultiplexedConnections::sendReadTaskResponse(const String & response)
+{
+ std::lock_guard lock(cancel_mutex);
+ if (cancelled)
+ return;
+ current_connection->sendReadTaskResponse(response);
+}
+
Packet MultiplexedConnections::receivePacket()
{
std::lock_guard lock(cancel_mutex);
@@ -210,6 +219,7 @@ Packet MultiplexedConnections::drain()
switch (packet.type)
{
+ case Protocol::Server::ReadTaskRequest:
case Protocol::Server::PartUUIDs:
case Protocol::Server::Data:
case Protocol::Server::Progress:
@@ -273,6 +283,7 @@ Packet MultiplexedConnections::receivePacketUnlocked(AsyncCallback async_callbac
switch (packet.type)
{
+ case Protocol::Server::ReadTaskRequest:
case Protocol::Server::PartUUIDs:
case Protocol::Server::Data:
case Protocol::Server::Progress:
diff --git a/src/Client/MultiplexedConnections.h b/src/Client/MultiplexedConnections.h
index c04b06e525e..f642db1c4cd 100644
--- a/src/Client/MultiplexedConnections.h
+++ b/src/Client/MultiplexedConnections.h
@@ -39,6 +39,8 @@ public:
const ClientInfo & client_info,
bool with_pending_data) override;
+ void sendReadTaskResponse(const String &) override;
+
Packet receivePacket() override;
void disconnect() override;
diff --git a/src/Common/ConcurrentBoundedQueue.h b/src/Common/ConcurrentBoundedQueue.h
index 7bc7f362095..cb29efc3349 100644
--- a/src/Common/ConcurrentBoundedQueue.h
+++ b/src/Common/ConcurrentBoundedQueue.h
@@ -6,7 +6,7 @@
#include
#include
-#include
+#include
/** A very simple thread-safe queue of limited size.
* If you try to pop an item from an empty queue, the thread is blocked until the queue becomes nonempty.
diff --git a/src/Common/PoolBase.h b/src/Common/PoolBase.h
index 43f4fbff9fe..6fc5aee26dd 100644
--- a/src/Common/PoolBase.h
+++ b/src/Common/PoolBase.h
@@ -51,7 +51,7 @@ private:
*/
struct PoolEntryHelper
{
- PoolEntryHelper(PooledObject & data_) : data(data_) { data.in_use = true; }
+ explicit PoolEntryHelper(PooledObject & data_) : data(data_) { data.in_use = true; }
~PoolEntryHelper()
{
std::unique_lock lock(data.pool.mutex);
@@ -69,7 +69,7 @@ public:
public:
friend class PoolBase