Merge branch 'master' into sundy-li-storage-rocksdb

This commit is contained in:
Alexey Milovidov 2020-11-08 17:51:44 +03:00
commit e75230dee3
255 changed files with 1846 additions and 1072 deletions

View File

@ -14,6 +14,11 @@ unset (_current_dir_name)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w")
if (SANITIZE STREQUAL "undefined")
# 3rd-party libraries usually not intended to work with UBSan.
add_compile_options(-fno-sanitize=undefined)
endif()
set_property(DIRECTORY PROPERTY EXCLUDE_FROM_ALL 1) set_property(DIRECTORY PROPERTY EXCLUDE_FROM_ALL 1)
add_subdirectory (boost-cmake) add_subdirectory (boost-cmake)
@ -157,9 +162,6 @@ if(USE_INTERNAL_SNAPPY_LIBRARY)
add_subdirectory(snappy) add_subdirectory(snappy)
set (SNAPPY_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/snappy") set (SNAPPY_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/snappy")
if(SANITIZE STREQUAL "undefined")
target_compile_options(${SNAPPY_LIBRARY} PRIVATE -fno-sanitize=undefined)
endif()
endif() endif()
if (USE_INTERNAL_PARQUET_LIBRARY) if (USE_INTERNAL_PARQUET_LIBRARY)

2
contrib/poco vendored

@ -1 +1 @@
Subproject commit 757d947235b307675cff964f29b19d388140a9eb Subproject commit f49c6ab8d3aa71828bd1b411485c21722e8c9d82

View File

@ -240,6 +240,10 @@ TESTS_TO_SKIP=(
01354_order_by_tuple_collate_const 01354_order_by_tuple_collate_const
01355_ilike 01355_ilike
01411_bayesian_ab_testing 01411_bayesian_ab_testing
01532_collate_in_low_cardinality
01533_collate_in_nullable
01542_collate_in_array
01543_collate_in_tuple
_orc_ _orc_
arrow arrow
avro avro

View File

@ -36,6 +36,7 @@ toc_title: Adopters
| <a href="https://www.criteo.com/" class="favicon">Criteo</a> | Retail | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/3_storetail.pptx) | | <a href="https://www.criteo.com/" class="favicon">Criteo</a> | Retail | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/3_storetail.pptx) |
| <a href="https://www.chinatelecomglobal.com/" class="favicon">Dataliance for China Telecom</a> | Telecom | Analytics | — | — | [Slides in Chinese, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) | | <a href="https://www.chinatelecomglobal.com/" class="favicon">Dataliance for China Telecom</a> | Telecom | Analytics | — | — | [Slides in Chinese, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) |
| <a href="https://db.com" class="favicon">Deutsche Bank</a> | Finance | BI Analytics | — | — | [Slides in English, October 2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) | | <a href="https://db.com" class="favicon">Deutsche Bank</a> | Finance | BI Analytics | — | — | [Slides in English, October 2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) |
| <a href="https://deeplay.io/eng/" class="favicon">Deeplay</a> | Gaming Analytics | — | — | — | [Job advertisement, 2020](https://career.habr.com/vacancies/1000062568) |
| <a href="https://www.diva-e.com" class="favicon">Diva-e</a> | Digital consulting | Main Product | — | — | [Slides in English, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) | | <a href="https://www.diva-e.com" class="favicon">Diva-e</a> | Digital consulting | Main Product | — | — | [Slides in English, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) |
| <a href="https://www.ecwid.com/" class="favicon">Ecwid</a> | E-commerce SaaS | Metrics, Logging | — | — | [Slides in Russian, April 2019](https://nastachku.ru/var/files/1/presentation/backend/2_Backend_6.pdf) | | <a href="https://www.ecwid.com/" class="favicon">Ecwid</a> | E-commerce SaaS | Metrics, Logging | — | — | [Slides in Russian, April 2019](https://nastachku.ru/var/files/1/presentation/backend/2_Backend_6.pdf) |
| <a href="https://www.ebay.com/" class="favicon">eBay</a> | E-commerce | Logs, Metrics and Events | — | — | [Official website, Sep 2020](https://tech.ebayinc.com/engineering/ou-online-analytical-processing/) | | <a href="https://www.ebay.com/" class="favicon">eBay</a> | E-commerce | Logs, Metrics and Events | — | — | [Official website, Sep 2020](https://tech.ebayinc.com/engineering/ou-online-analytical-processing/) |
@ -45,6 +46,7 @@ toc_title: Adopters
| <a href="https://fun.co/rp" class="favicon">FunCorp</a> | Games | | — | — | [Article](https://www.altinity.com/blog/migrating-from-redshift-to-clickhouse) | | <a href="https://fun.co/rp" class="favicon">FunCorp</a> | Games | | — | — | [Article](https://www.altinity.com/blog/migrating-from-redshift-to-clickhouse) |
| <a href="https://geniee.co.jp" class="favicon">Geniee</a> | Ad network | Main product | — | — | [Blog post in Japanese, July 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) | | <a href="https://geniee.co.jp" class="favicon">Geniee</a> | Ad network | Main product | — | — | [Blog post in Japanese, July 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) |
| <a href="https://www.huya.com/" class="favicon">HUYA</a> | Video Streaming | Analytics | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | | <a href="https://www.huya.com/" class="favicon">HUYA</a> | Video Streaming | Analytics | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) |
| <a href="https://www.the-ica.com/" class="favicon">ICA</a> | FinTech | Risk Management | — | — | [Blog Post in English, Sep 2020](https://altinity.com/blog/clickhouse-vs-redshift-performance-for-fintech-risk-management?utm_campaign=ClickHouse%20vs%20RedShift&utm_content=143520807&utm_medium=social&utm_source=twitter&hss_channel=tw-3894792263) |
| <a href="https://www.idealista.com" class="favicon">Idealista</a> | Real Estate | Analytics | — | — | [Blog Post in English, April 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | | <a href="https://www.idealista.com" class="favicon">Idealista</a> | Real Estate | Analytics | — | — | [Blog Post in English, April 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) |
| <a href="https://www.infovista.com/" class="favicon">Infovista</a> | Networks | Analytics | — | — | [Slides in English, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | | <a href="https://www.infovista.com/" class="favicon">Infovista</a> | Networks | Analytics | — | — | [Slides in English, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) |
| <a href="https://www.innogames.com" class="favicon">InnoGames</a> | Games | Metrics, Logging | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | | <a href="https://www.innogames.com" class="favicon">InnoGames</a> | Games | Metrics, Logging | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) |
@ -68,6 +70,7 @@ toc_title: Adopters
| <a href="https://www.nuna.com/" class="favicon">Nuna Inc.</a> | Health Data Analytics | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=170) | | <a href="https://www.nuna.com/" class="favicon">Nuna Inc.</a> | Health Data Analytics | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=170) |
| <a href="https://www.oneapm.com/" class="favicon">OneAPM</a> | Monitorings and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) | | <a href="https://www.oneapm.com/" class="favicon">OneAPM</a> | Monitorings and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) |
| <a href="https://www.percent.cn/" class="favicon">Percent 百分点</a> | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) | | <a href="https://www.percent.cn/" class="favicon">Percent 百分点</a> | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) |
| <a href="https://www.percona.com/" class="favicon">Percona</a> | Performance analysis | Percona Monitoring and Management | — | — | [Official website, Mar 2020](https://www.percona.com/blog/2020/03/30/advanced-query-analysis-in-percona-monitoring-and-management-with-direct-clickhouse-access/) |
| <a href="https://plausible.io/" class="favicon">Plausible</a> | Analytics | Main Product | — | — | [Blog post, June 2020](https://twitter.com/PlausibleHQ/status/1273889629087969280) | | <a href="https://plausible.io/" class="favicon">Plausible</a> | Analytics | Main Product | — | — | [Blog post, June 2020](https://twitter.com/PlausibleHQ/status/1273889629087969280) |
| <a href="https://posthog.com/" class="favicon">PostHog</a> | Product Analytics | Main Product | — | — | [Release Notes, Oct 2020](https://posthog.com/blog/the-posthog-array-1-15-0) | | <a href="https://posthog.com/" class="favicon">PostHog</a> | Product Analytics | Main Product | — | — | [Release Notes, Oct 2020](https://posthog.com/blog/the-posthog-array-1-15-0) |
| <a href="https://postmates.com/" class="favicon">Postmates</a> | Delivery | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=188) | | <a href="https://postmates.com/" class="favicon">Postmates</a> | Delivery | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=188) |

View File

@ -1765,6 +1765,23 @@ Default value: `0`.
- [Distributed Table Engine](../../engines/table-engines/special/distributed.md#distributed) - [Distributed Table Engine](../../engines/table-engines/special/distributed.md#distributed)
- [Managing Distributed Tables](../../sql-reference/statements/system.md#query-language-system-distributed) - [Managing Distributed Tables](../../sql-reference/statements/system.md#query-language-system-distributed)
## use_compact_format_in_distributed_parts_names {#use_compact_format_in_distributed_parts_names}
Uses compact format for storing blocks for async (`insert_distributed_sync`) INSERT into tables with `Distributed` engine.
Possible values:
- 0 — Uses `user[:password]@host:port#default_database` directory format.
- 1 — Uses `[shard{shard_index}[_replica{replica_index}]]` directory format.
Default value: `1`.
!!! note "Note"
- with `use_compact_format_in_distributed_parts_names=0` changes from cluster definition will not be applied for async INSERT.
- with `use_compact_format_in_distributed_parts_names=1` changing the order of the nodes in the cluster definition, will change the `shard_index`/`replica_index` so be aware.
## background_buffer_flush_schedule_pool_size {#background_buffer_flush_schedule_pool_size} ## background_buffer_flush_schedule_pool_size {#background_buffer_flush_schedule_pool_size}
Sets the number of threads performing background flush in [Buffer](../../engines/table-engines/special/buffer.md)-engine tables. This setting is applied at the ClickHouse server start and cant be changed in a user session. Sets the number of threads performing background flush in [Buffer](../../engines/table-engines/special/buffer.md)-engine tables. This setting is applied at the ClickHouse server start and cant be changed in a user session.

View File

@ -4,6 +4,6 @@ toc_priority: 140
# sumWithOverflow {#sumwithoverflowx} # sumWithOverflow {#sumwithoverflowx}
Computes the sum of the numbers, using the same data type for the result as for the input parameters. If the sum exceeds the maximum value for this data type, the function returns an error. Computes the sum of the numbers, using the same data type for the result as for the input parameters. If the sum exceeds the maximum value for this data type, it is calculated with overflow.
Only works for numbers. Only works for numbers.

View File

@ -64,6 +64,6 @@ CREATE TABLE merge.hits_buffer AS merge.hits ENGINE = Buffer(merge, hits, 16, 10
Таблицы типа Buffer используются в тех случаях, когда от большого количества серверов поступает слишком много INSERT-ов в единицу времени, и нет возможности заранее самостоятельно буферизовать данные перед вставкой, в результате чего, INSERT-ы не успевают выполняться. Таблицы типа Buffer используются в тех случаях, когда от большого количества серверов поступает слишком много INSERT-ов в единицу времени, и нет возможности заранее самостоятельно буферизовать данные перед вставкой, в результате чего, INSERT-ы не успевают выполняться.
Заметим, что даже для таблиц типа Buffer не имеет смысла вставлять данные по одной строке, так как таким образом будет достигнута скорость всего лишь в несколько тысяч строк в секунду, тогда как при вставке более крупными блоками, достижимо более миллиона строк в секунду (смотрите раздел «Производительность»). Заметим, что даже для таблиц типа Buffer не имеет смысла вставлять данные по одной строке, так как таким образом будет достигнута скорость всего лишь в несколько тысяч строк в секунду, тогда как при вставке более крупными блоками, достижимо более миллиона строк в секунду (смотрите раздел [«Производительность»](../../../introduction/performance/).
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/buffer/) <!--hide--> [Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/buffer/) <!--hide-->

View File

@ -227,9 +227,6 @@ else ()
install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-git-import DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-git-import DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
list(APPEND CLICKHOUSE_BUNDLE clickhouse-git-import) list(APPEND CLICKHOUSE_BUNDLE clickhouse-git-import)
endif () endif ()
if(ENABLE_CLICKHOUSE_ODBC_BRIDGE)
list(APPEND CLICKHOUSE_BUNDLE clickhouse-odbc-bridge)
endif()
install (TARGETS clickhouse RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) install (TARGETS clickhouse RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)

View File

@ -75,6 +75,7 @@
#include <Common/InterruptListener.h> #include <Common/InterruptListener.h>
#include <Functions/registerFunctions.h> #include <Functions/registerFunctions.h>
#include <AggregateFunctions/registerAggregateFunctions.h> #include <AggregateFunctions/registerAggregateFunctions.h>
#include <Formats/registerFormats.h>
#include <Common/Config/configReadClient.h> #include <Common/Config/configReadClient.h>
#include <Storages/ColumnsDescription.h> #include <Storages/ColumnsDescription.h>
#include <common/argsToConfig.h> #include <common/argsToConfig.h>
@ -463,6 +464,7 @@ private:
{ {
UseSSL use_ssl; UseSSL use_ssl;
registerFormats();
registerFunctions(); registerFunctions();
registerAggregateFunctions(); registerAggregateFunctions();

View File

@ -1,6 +1,7 @@
#include "ClusterCopierApp.h" #include "ClusterCopierApp.h"
#include <Common/StatusFile.h> #include <Common/StatusFile.h>
#include <Common/TerminalSize.h> #include <Common/TerminalSize.h>
#include <Formats/registerFormats.h>
#include <unistd.h> #include <unistd.h>
@ -122,6 +123,7 @@ void ClusterCopierApp::mainImpl()
registerStorages(); registerStorages();
registerDictionaries(); registerDictionaries();
registerDisks(); registerDisks();
registerFormats();
static const std::string default_database = "_local"; static const std::string default_database = "_local";
DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared<DatabaseMemory>(default_database, *context)); DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared<DatabaseMemory>(default_database, *context));

View File

@ -33,6 +33,7 @@
#include <Storages/registerStorages.h> #include <Storages/registerStorages.h>
#include <Dictionaries/registerDictionaries.h> #include <Dictionaries/registerDictionaries.h>
#include <Disks/registerDisks.h> #include <Disks/registerDisks.h>
#include <Formats/registerFormats.h>
#include <boost/program_options/options_description.hpp> #include <boost/program_options/options_description.hpp>
#include <boost/program_options.hpp> #include <boost/program_options.hpp>
#include <common/argsToConfig.h> #include <common/argsToConfig.h>
@ -224,6 +225,7 @@ try
registerStorages(); registerStorages();
registerDictionaries(); registerDictionaries();
registerDisks(); registerDisks();
registerFormats();
/// Maybe useless /// Maybe useless
if (config().has("macros")) if (config().has("macros"))

View File

@ -23,6 +23,7 @@
#include <Common/HashTable/HashMap.h> #include <Common/HashTable/HashMap.h>
#include <Common/typeid_cast.h> #include <Common/typeid_cast.h>
#include <Common/assert_cast.h> #include <Common/assert_cast.h>
#include <Formats/registerFormats.h>
#include <Core/Block.h> #include <Core/Block.h>
#include <common/StringRef.h> #include <common/StringRef.h>
#include <common/DateLUT.h> #include <common/DateLUT.h>
@ -1050,6 +1051,8 @@ try
using namespace DB; using namespace DB;
namespace po = boost::program_options; namespace po = boost::program_options;
registerFormats();
po::options_description description = createOptionsDescription("Options", getTerminalWidth()); po::options_description description = createOptionsDescription("Options", getTerminalWidth());
description.add_options() description.add_options()
("help", "produce help message") ("help", "produce help message")

View File

@ -10,19 +10,8 @@ set (CLICKHOUSE_ODBC_BRIDGE_SOURCES
PingHandler.cpp PingHandler.cpp
SchemaAllowedHandler.cpp SchemaAllowedHandler.cpp
validateODBCConnectionString.cpp validateODBCConnectionString.cpp
odbc-bridge.cpp
) )
set (CLICKHOUSE_ODBC_BRIDGE_LINK
PRIVATE
clickhouse_parsers
clickhouse_aggregate_functions
daemon
dbms
Poco::Data
PUBLIC
Poco::Data::ODBC
)
clickhouse_program_add_library(odbc-bridge)
if (OS_LINUX) if (OS_LINUX)
# clickhouse-odbc-bridge is always a separate binary. # clickhouse-odbc-bridge is always a separate binary.
@ -30,10 +19,17 @@ if (OS_LINUX)
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-export-dynamic") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-export-dynamic")
endif () endif ()
add_executable(clickhouse-odbc-bridge odbc-bridge.cpp) add_executable(clickhouse-odbc-bridge ${CLICKHOUSE_ODBC_BRIDGE_SOURCES})
set_target_properties(clickhouse-odbc-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..)
clickhouse_program_link_split_binary(odbc-bridge) target_link_libraries(clickhouse-odbc-bridge PRIVATE
daemon
dbms
clickhouse_parsers
Poco::Data
Poco::Data::ODBC
)
set_target_properties(clickhouse-odbc-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..)
if (USE_GDB_ADD_INDEX) if (USE_GDB_ADD_INDEX)
add_custom_command(TARGET clickhouse-odbc-bridge POST_BUILD COMMAND ${GDB_ADD_INDEX_EXE} ../clickhouse-odbc-bridge COMMENT "Adding .gdb-index to clickhouse-odbc-bridge" VERBATIM) add_custom_command(TARGET clickhouse-odbc-bridge POST_BUILD COMMAND ${GDB_ADD_INDEX_EXE} ../clickhouse-odbc-bridge COMMENT "Adding .gdb-index to clickhouse-odbc-bridge" VERBATIM)

View File

@ -18,11 +18,13 @@
#include <Common/Exception.h> #include <Common/Exception.h>
#include <Common/StringUtils/StringUtils.h> #include <Common/StringUtils/StringUtils.h>
#include <Common/config.h> #include <Common/config.h>
#include <Formats/registerFormats.h>
#include <common/logger_useful.h> #include <common/logger_useful.h>
#include <ext/scope_guard.h> #include <ext/scope_guard.h>
#include <ext/range.h> #include <ext/range.h>
#include <Common/SensitiveDataMasker.h> #include <Common/SensitiveDataMasker.h>
namespace DB namespace DB
{ {
namespace ErrorCodes namespace ErrorCodes
@ -160,6 +162,8 @@ int ODBCBridge::main(const std::vector<std::string> & /*args*/)
if (is_help) if (is_help)
return Application::EXIT_OK; return Application::EXIT_OK;
registerFormats();
LOG_INFO(log, "Starting up"); LOG_INFO(log, "Starting up");
Poco::Net::ServerSocket socket; Poco::Net::ServerSocket socket;
auto address = socketBindListen(socket, hostname, port, log); auto address = socketBindListen(socket, hostname, port, log);

View File

@ -1,3 +1,2 @@
add_executable (validate-odbc-connection-string validate-odbc-connection-string.cpp) add_executable (validate-odbc-connection-string validate-odbc-connection-string.cpp ../validateODBCConnectionString.cpp)
clickhouse_target_link_split_lib(validate-odbc-connection-string odbc-bridge)
target_link_libraries (validate-odbc-connection-string PRIVATE clickhouse_common_io) target_link_libraries (validate-odbc-connection-string PRIVATE clickhouse_common_io)

View File

@ -51,6 +51,7 @@
#include <AggregateFunctions/registerAggregateFunctions.h> #include <AggregateFunctions/registerAggregateFunctions.h>
#include <Functions/registerFunctions.h> #include <Functions/registerFunctions.h>
#include <TableFunctions/registerTableFunctions.h> #include <TableFunctions/registerTableFunctions.h>
#include <Formats/registerFormats.h>
#include <Storages/registerStorages.h> #include <Storages/registerStorages.h>
#include <Dictionaries/registerDictionaries.h> #include <Dictionaries/registerDictionaries.h>
#include <Disks/registerDisks.h> #include <Disks/registerDisks.h>
@ -266,6 +267,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
registerStorages(); registerStorages();
registerDictionaries(); registerDictionaries();
registerDisks(); registerDisks();
registerFormats();
CurrentMetrics::set(CurrentMetrics::Revision, ClickHouseRevision::getVersionRevision()); CurrentMetrics::set(CurrentMetrics::Revision, ClickHouseRevision::getVersionRevision());
CurrentMetrics::set(CurrentMetrics::VersionInteger, ClickHouseRevision::getVersionInteger()); CurrentMetrics::set(CurrentMetrics::VersionInteger, ClickHouseRevision::getVersionInteger());

View File

@ -0,0 +1 @@
../../../tests/config/config.d/test_cluster_with_incorrect_pw.xml

View File

@ -198,6 +198,7 @@ namespace
/// Serialize the list of ATTACH queries to a string. /// Serialize the list of ATTACH queries to a string.
std::stringstream ss; std::stringstream ss;
ss.exceptions(std::ios::failbit);
for (const ASTPtr & query : queries) for (const ASTPtr & query : queries)
ss << *query << ";\n"; ss << *query << ";\n";
String file_contents = std::move(ss).str(); String file_contents = std::move(ss).str();
@ -353,6 +354,7 @@ String DiskAccessStorage::getStorageParamsJSON() const
if (readonly) if (readonly)
json.set("readonly", readonly.load()); json.set("readonly", readonly.load());
std::ostringstream oss; std::ostringstream oss;
oss.exceptions(std::ios::failbit);
Poco::JSON::Stringifier::stringify(json, oss); Poco::JSON::Stringifier::stringify(json, oss);
return oss.str(); return oss.str();
} }

View File

@ -151,6 +151,7 @@ String LDAPAccessStorage::getStorageParamsJSON() const
params_json.set("roles", default_role_names); params_json.set("roles", default_role_names);
std::ostringstream oss; std::ostringstream oss;
oss.exceptions(std::ios::failbit);
Poco::JSON::Stringifier::stringify(params_json, oss); Poco::JSON::Stringifier::stringify(params_json, oss);
return oss.str(); return oss.str();

View File

@ -461,6 +461,7 @@ String UsersConfigAccessStorage::getStorageParamsJSON() const
if (!path.empty()) if (!path.empty())
json.set("path", path); json.set("path", path);
std::ostringstream oss; std::ostringstream oss;
oss.exceptions(std::ios::failbit);
Poco::JSON::Stringifier::stringify(json, oss); Poco::JSON::Stringifier::stringify(json, oss);
return oss.str(); return oss.str();
} }

View File

@ -245,6 +245,7 @@ public:
{ {
DB::writeIntBinary<size_t>(this->data(place).total_values, buf); DB::writeIntBinary<size_t>(this->data(place).total_values, buf);
std::ostringstream rng_stream; std::ostringstream rng_stream;
rng_stream.exceptions(std::ios::failbit);
rng_stream << this->data(place).rng; rng_stream << this->data(place).rng;
DB::writeStringBinary(rng_stream.str(), buf); DB::writeStringBinary(rng_stream.str(), buf);
} }
@ -275,6 +276,7 @@ public:
std::string rng_string; std::string rng_string;
DB::readStringBinary(rng_string, buf); DB::readStringBinary(rng_string, buf);
std::istringstream rng_stream(rng_string); std::istringstream rng_stream(rng_string);
rng_stream.exceptions(std::ios::failbit);
rng_stream >> this->data(place).rng; rng_stream >> this->data(place).rng;
} }
@ -564,6 +566,7 @@ public:
{ {
DB::writeIntBinary<size_t>(data(place).total_values, buf); DB::writeIntBinary<size_t>(data(place).total_values, buf);
std::ostringstream rng_stream; std::ostringstream rng_stream;
rng_stream.exceptions(std::ios::failbit);
rng_stream << data(place).rng; rng_stream << data(place).rng;
DB::writeStringBinary(rng_stream.str(), buf); DB::writeStringBinary(rng_stream.str(), buf);
} }
@ -598,6 +601,7 @@ public:
std::string rng_string; std::string rng_string;
DB::readStringBinary(rng_string, buf); DB::readStringBinary(rng_string, buf);
std::istringstream rng_stream(rng_string); std::istringstream rng_stream(rng_string);
rng_stream.exceptions(std::ios::failbit);
rng_stream >> data(place).rng; rng_stream >> data(place).rng;
} }

View File

@ -191,6 +191,7 @@ public:
std::string rng_string; std::string rng_string;
DB::readStringBinary(rng_string, buf); DB::readStringBinary(rng_string, buf);
std::istringstream rng_stream(rng_string); std::istringstream rng_stream(rng_string);
rng_stream.exceptions(std::ios::failbit);
rng_stream >> rng; rng_stream >> rng;
for (size_t i = 0; i < samples.size(); ++i) for (size_t i = 0; i < samples.size(); ++i)
@ -205,6 +206,7 @@ public:
DB::writeIntBinary<size_t>(total_values, buf); DB::writeIntBinary<size_t>(total_values, buf);
std::ostringstream rng_stream; std::ostringstream rng_stream;
rng_stream.exceptions(std::ios::failbit);
rng_stream << rng; rng_stream << rng;
DB::writeStringBinary(rng_stream.str(), buf); DB::writeStringBinary(rng_stream.str(), buf);

View File

@ -223,6 +223,7 @@ std::string MultiplexedConnections::dumpAddressesUnlocked() const
{ {
bool is_first = true; bool is_first = true;
std::ostringstream os; std::ostringstream os;
os.exceptions(std::ios::failbit);
for (const ReplicaState & state : replica_states) for (const ReplicaState & state : replica_states)
{ {
const Connection * connection = state.connection; const Connection * connection = state.connection;

View File

@ -324,8 +324,7 @@ void ColumnArray::popBack(size_t n)
offsets_data.resize_assume_reserved(offsets_data.size() - n); offsets_data.resize_assume_reserved(offsets_data.size() - n);
} }
int ColumnArray::compareAtImpl(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint, const Collator * collator) const
int ColumnArray::compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const
{ {
const ColumnArray & rhs = assert_cast<const ColumnArray &>(rhs_); const ColumnArray & rhs = assert_cast<const ColumnArray &>(rhs_);
@ -334,8 +333,15 @@ int ColumnArray::compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_dir
size_t rhs_size = rhs.sizeAt(m); size_t rhs_size = rhs.sizeAt(m);
size_t min_size = std::min(lhs_size, rhs_size); size_t min_size = std::min(lhs_size, rhs_size);
for (size_t i = 0; i < min_size; ++i) for (size_t i = 0; i < min_size; ++i)
if (int res = getData().compareAt(offsetAt(n) + i, rhs.offsetAt(m) + i, *rhs.data.get(), nan_direction_hint)) {
int res;
if (collator)
res = getData().compareAtWithCollation(offsetAt(n) + i, rhs.offsetAt(m) + i, *rhs.data.get(), nan_direction_hint, *collator);
else
res = getData().compareAt(offsetAt(n) + i, rhs.offsetAt(m) + i, *rhs.data.get(), nan_direction_hint);
if (res)
return res; return res;
}
return lhs_size < rhs_size return lhs_size < rhs_size
? -1 ? -1
@ -344,6 +350,16 @@ int ColumnArray::compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_dir
: 1); : 1);
} }
int ColumnArray::compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const
{
return compareAtImpl(n, m, rhs_, nan_direction_hint);
}
int ColumnArray::compareAtWithCollation(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint, const Collator & collator) const
{
return compareAtImpl(n, m, rhs_, nan_direction_hint, &collator);
}
void ColumnArray::compareColumn(const IColumn & rhs, size_t rhs_row_num, void ColumnArray::compareColumn(const IColumn & rhs, size_t rhs_row_num,
PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results, PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results,
int direction, int nan_direction_hint) const int direction, int nan_direction_hint) const
@ -352,27 +368,26 @@ void ColumnArray::compareColumn(const IColumn & rhs, size_t rhs_row_num,
compare_results, direction, nan_direction_hint); compare_results, direction, nan_direction_hint);
} }
namespace template <bool positive>
struct ColumnArray::Cmp
{ {
template <bool positive>
struct Less
{
const ColumnArray & parent; const ColumnArray & parent;
int nan_direction_hint; int nan_direction_hint;
const Collator * collator;
Less(const ColumnArray & parent_, int nan_direction_hint_) Cmp(const ColumnArray & parent_, int nan_direction_hint_, const Collator * collator_=nullptr)
: parent(parent_), nan_direction_hint(nan_direction_hint_) {} : parent(parent_), nan_direction_hint(nan_direction_hint_), collator(collator_) {}
bool operator()(size_t lhs, size_t rhs) const int operator()(size_t lhs, size_t rhs) const
{ {
if (positive) int res;
return parent.compareAt(lhs, rhs, parent, nan_direction_hint) < 0; if (collator)
res = parent.compareAtWithCollation(lhs, rhs, parent, nan_direction_hint, *collator);
else else
return parent.compareAt(lhs, rhs, parent, nan_direction_hint) > 0; res = parent.compareAt(lhs, rhs, parent, nan_direction_hint);
return positive ? res : -res;
} }
}; };
}
void ColumnArray::reserve(size_t n) void ColumnArray::reserve(size_t n)
{ {
@ -753,7 +768,8 @@ ColumnPtr ColumnArray::indexImpl(const PaddedPODArray<T> & indexes, size_t limit
INSTANTIATE_INDEX_IMPL(ColumnArray) INSTANTIATE_INDEX_IMPL(ColumnArray)
void ColumnArray::getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const template <typename Comparator>
void ColumnArray::getPermutationImpl(size_t limit, Permutation & res, Comparator cmp) const
{ {
size_t s = size(); size_t s = size();
if (limit >= s) if (limit >= s)
@ -763,23 +779,16 @@ void ColumnArray::getPermutation(bool reverse, size_t limit, int nan_direction_h
for (size_t i = 0; i < s; ++i) for (size_t i = 0; i < s; ++i)
res[i] = i; res[i] = i;
auto less = [&cmp](size_t lhs, size_t rhs){ return cmp(lhs, rhs) < 0; };
if (limit) if (limit)
{ std::partial_sort(res.begin(), res.begin() + limit, res.end(), less);
if (reverse)
std::partial_sort(res.begin(), res.begin() + limit, res.end(), Less<false>(*this, nan_direction_hint));
else else
std::partial_sort(res.begin(), res.begin() + limit, res.end(), Less<true>(*this, nan_direction_hint)); std::sort(res.begin(), res.end(), less);
}
else
{
if (reverse)
std::sort(res.begin(), res.end(), Less<false>(*this, nan_direction_hint));
else
std::sort(res.begin(), res.end(), Less<true>(*this, nan_direction_hint));
}
} }
void ColumnArray::updatePermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges & equal_range) const template <typename Comparator>
void ColumnArray::updatePermutationImpl(size_t limit, Permutation & res, EqualRanges & equal_range, Comparator cmp) const
{ {
if (equal_range.empty()) if (equal_range.empty())
return; return;
@ -792,20 +801,19 @@ void ColumnArray::updatePermutation(bool reverse, size_t limit, int nan_directio
if (limit) if (limit)
--number_of_ranges; --number_of_ranges;
auto less = [&cmp](size_t lhs, size_t rhs){ return cmp(lhs, rhs) < 0; };
EqualRanges new_ranges; EqualRanges new_ranges;
for (size_t i = 0; i < number_of_ranges; ++i) for (size_t i = 0; i < number_of_ranges; ++i)
{ {
const auto & [first, last] = equal_range[i]; const auto & [first, last] = equal_range[i];
if (reverse) std::sort(res.begin() + first, res.begin() + last, less);
std::sort(res.begin() + first, res.begin() + last, Less<false>(*this, nan_direction_hint));
else
std::sort(res.begin() + first, res.begin() + last, Less<true>(*this, nan_direction_hint));
auto new_first = first; auto new_first = first;
for (auto j = first + 1; j < last; ++j) for (auto j = first + 1; j < last; ++j)
{ {
if (compareAt(res[new_first], res[j], *this, nan_direction_hint) != 0) if (cmp(res[new_first], res[j]) != 0)
{ {
if (j - new_first > 1) if (j - new_first > 1)
new_ranges.emplace_back(new_first, j); new_ranges.emplace_back(new_first, j);
@ -827,14 +835,11 @@ void ColumnArray::updatePermutation(bool reverse, size_t limit, int nan_directio
/// Since then we are working inside the interval. /// Since then we are working inside the interval.
if (reverse) std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less);
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, Less<false>(*this, nan_direction_hint));
else
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, Less<true>(*this, nan_direction_hint));
auto new_first = first; auto new_first = first;
for (auto j = first + 1; j < limit; ++j) for (auto j = first + 1; j < limit; ++j)
{ {
if (compareAt(res[new_first], res[j], *this, nan_direction_hint) != 0) if (cmp(res[new_first], res[j]) != 0)
{ {
if (j - new_first > 1) if (j - new_first > 1)
new_ranges.emplace_back(new_first, j); new_ranges.emplace_back(new_first, j);
@ -845,7 +850,7 @@ void ColumnArray::updatePermutation(bool reverse, size_t limit, int nan_directio
auto new_last = limit; auto new_last = limit;
for (auto j = limit; j < last; ++j) for (auto j = limit; j < last; ++j)
{ {
if (compareAt(res[new_first], res[j], *this, nan_direction_hint) == 0) if (cmp(res[new_first], res[j]) == 0)
{ {
std::swap(res[new_last], res[j]); std::swap(res[new_last], res[j]);
++new_last; ++new_last;
@ -859,6 +864,39 @@ void ColumnArray::updatePermutation(bool reverse, size_t limit, int nan_directio
equal_range = std::move(new_ranges); equal_range = std::move(new_ranges);
} }
void ColumnArray::getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const
{
if (reverse)
getPermutationImpl(limit, res, Cmp<false>(*this, nan_direction_hint));
else
getPermutationImpl(limit, res, Cmp<true>(*this, nan_direction_hint));
}
void ColumnArray::updatePermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges & equal_range) const
{
if (reverse)
updatePermutationImpl(limit, res, equal_range, Cmp<false>(*this, nan_direction_hint));
else
updatePermutationImpl(limit, res, equal_range, Cmp<true>(*this, nan_direction_hint));
}
void ColumnArray::getPermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const
{
if (reverse)
getPermutationImpl(limit, res, Cmp<false>(*this, nan_direction_hint, &collator));
else
getPermutationImpl(limit, res, Cmp<true>(*this, nan_direction_hint, &collator));
}
void ColumnArray::updatePermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges & equal_range) const
{
if (reverse)
updatePermutationImpl(limit, res, equal_range, Cmp<false>(*this, nan_direction_hint, &collator));
else
updatePermutationImpl(limit, res, equal_range, Cmp<true>(*this, nan_direction_hint, &collator));
}
ColumnPtr ColumnArray::replicate(const Offsets & replicate_offsets) const ColumnPtr ColumnArray::replicate(const Offsets & replicate_offsets) const
{ {
if (replicate_offsets.empty()) if (replicate_offsets.empty())

View File

@ -77,8 +77,11 @@ public:
void compareColumn(const IColumn & rhs, size_t rhs_row_num, void compareColumn(const IColumn & rhs, size_t rhs_row_num,
PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results, PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results,
int direction, int nan_direction_hint) const override; int direction, int nan_direction_hint) const override;
int compareAtWithCollation(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint, const Collator & collator) const override;
void getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override; void getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override;
void updatePermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges & equal_range) const override; void updatePermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges & equal_range) const override;
void getPermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override;
void updatePermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges& equal_range) const override;
void reserve(size_t n) override; void reserve(size_t n) override;
size_t byteSize() const override; size_t byteSize() const override;
size_t allocatedBytes() const override; size_t allocatedBytes() const override;
@ -132,6 +135,8 @@ public:
return false; return false;
} }
bool isCollationSupported() const override { return getData().isCollationSupported(); }
private: private:
WrappedPtr data; WrappedPtr data;
WrappedPtr offsets; WrappedPtr offsets;
@ -169,6 +174,17 @@ private:
ColumnPtr filterTuple(const Filter & filt, ssize_t result_size_hint) const; ColumnPtr filterTuple(const Filter & filt, ssize_t result_size_hint) const;
ColumnPtr filterNullable(const Filter & filt, ssize_t result_size_hint) const; ColumnPtr filterNullable(const Filter & filt, ssize_t result_size_hint) const;
ColumnPtr filterGeneric(const Filter & filt, ssize_t result_size_hint) const; ColumnPtr filterGeneric(const Filter & filt, ssize_t result_size_hint) const;
int compareAtImpl(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint, const Collator * collator=nullptr) const;
template <typename Comparator>
void getPermutationImpl(size_t limit, Permutation & res, Comparator cmp) const;
template <typename Comparator>
void updatePermutationImpl(size_t limit, Permutation & res, EqualRanges & equal_range, Comparator cmp) const;
template <bool positive>
struct Cmp;
}; };

View File

@ -248,6 +248,8 @@ public:
/// The constant value. It is valid even if the size of the column is 0. /// The constant value. It is valid even if the size of the column is 0.
template <typename T> template <typename T>
T getValue() const { return getField().safeGet<NearestFieldType<T>>(); } T getValue() const { return getField().safeGet<NearestFieldType<T>>(); }
bool isCollationSupported() const override { return data->isCollationSupported(); }
}; };
} }

View File

@ -1,5 +1,6 @@
#include <Columns/ColumnLowCardinality.h> #include <Columns/ColumnLowCardinality.h>
#include <Columns/ColumnsNumber.h> #include <Columns/ColumnsNumber.h>
#include <Columns/ColumnString.h>
#include <DataStreams/ColumnGathererStream.h> #include <DataStreams/ColumnGathererStream.h>
#include <DataTypes/NumberTraits.h> #include <DataTypes/NumberTraits.h>
#include <Common/HashTable/HashMap.h> #include <Common/HashTable/HashMap.h>
@ -278,14 +279,26 @@ MutableColumnPtr ColumnLowCardinality::cloneResized(size_t size) const
return ColumnLowCardinality::create(IColumn::mutate(std::move(unique_ptr)), getIndexes().cloneResized(size)); return ColumnLowCardinality::create(IColumn::mutate(std::move(unique_ptr)), getIndexes().cloneResized(size));
} }
int ColumnLowCardinality::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const int ColumnLowCardinality::compareAtImpl(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint, const Collator * collator) const
{ {
const auto & low_cardinality_column = assert_cast<const ColumnLowCardinality &>(rhs); const auto & low_cardinality_column = assert_cast<const ColumnLowCardinality &>(rhs);
size_t n_index = getIndexes().getUInt(n); size_t n_index = getIndexes().getUInt(n);
size_t m_index = low_cardinality_column.getIndexes().getUInt(m); size_t m_index = low_cardinality_column.getIndexes().getUInt(m);
if (collator)
return getDictionary().getNestedColumn()->compareAtWithCollation(n_index, m_index, *low_cardinality_column.getDictionary().getNestedColumn(), nan_direction_hint, *collator);
return getDictionary().compareAt(n_index, m_index, low_cardinality_column.getDictionary(), nan_direction_hint); return getDictionary().compareAt(n_index, m_index, low_cardinality_column.getDictionary(), nan_direction_hint);
} }
int ColumnLowCardinality::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
{
return compareAtImpl(n, m, rhs, nan_direction_hint);
}
int ColumnLowCardinality::compareAtWithCollation(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint, const Collator & collator) const
{
return compareAtImpl(n, m, rhs, nan_direction_hint, &collator);
}
void ColumnLowCardinality::compareColumn(const IColumn & rhs, size_t rhs_row_num, void ColumnLowCardinality::compareColumn(const IColumn & rhs, size_t rhs_row_num,
PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results, PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results,
int direction, int nan_direction_hint) const int direction, int nan_direction_hint) const
@ -295,13 +308,16 @@ void ColumnLowCardinality::compareColumn(const IColumn & rhs, size_t rhs_row_num
compare_results, direction, nan_direction_hint); compare_results, direction, nan_direction_hint);
} }
void ColumnLowCardinality::getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const void ColumnLowCardinality::getPermutationImpl(bool reverse, size_t limit, int nan_direction_hint, Permutation & res, const Collator * collator) const
{ {
if (limit == 0) if (limit == 0)
limit = size(); limit = size();
size_t unique_limit = getDictionary().size(); size_t unique_limit = getDictionary().size();
Permutation unique_perm; Permutation unique_perm;
if (collator)
getDictionary().getNestedColumn()->getPermutationWithCollation(*collator, reverse, unique_limit, nan_direction_hint, unique_perm);
else
getDictionary().getNestedColumn()->getPermutation(reverse, unique_limit, nan_direction_hint, unique_perm); getDictionary().getNestedColumn()->getPermutation(reverse, unique_limit, nan_direction_hint, unique_perm);
/// TODO: optimize with sse. /// TODO: optimize with sse.
@ -330,7 +346,8 @@ void ColumnLowCardinality::getPermutation(bool reverse, size_t limit, int nan_di
} }
} }
void ColumnLowCardinality::updatePermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const template <typename Cmp>
void ColumnLowCardinality::updatePermutationImpl(size_t limit, Permutation & res, EqualRanges & equal_ranges, Cmp comparator) const
{ {
if (equal_ranges.empty()) if (equal_ranges.empty())
return; return;
@ -345,20 +362,17 @@ void ColumnLowCardinality::updatePermutation(bool reverse, size_t limit, int nan
EqualRanges new_ranges; EqualRanges new_ranges;
SCOPE_EXIT({equal_ranges = std::move(new_ranges);}); SCOPE_EXIT({equal_ranges = std::move(new_ranges);});
auto less = [&comparator](size_t lhs, size_t rhs){ return comparator(lhs, rhs) < 0; };
for (size_t i = 0; i < number_of_ranges; ++i) for (size_t i = 0; i < number_of_ranges; ++i)
{ {
const auto& [first, last] = equal_ranges[i]; const auto& [first, last] = equal_ranges[i];
if (reverse) std::sort(res.begin() + first, res.begin() + last, less);
std::sort(res.begin() + first, res.begin() + last, [this, nan_direction_hint](size_t a, size_t b)
{return getDictionary().compareAt(getIndexes().getUInt(a), getIndexes().getUInt(b), getDictionary(), nan_direction_hint) > 0; });
else
std::sort(res.begin() + first, res.begin() + last, [this, nan_direction_hint](size_t a, size_t b)
{return getDictionary().compareAt(getIndexes().getUInt(a), getIndexes().getUInt(b), getDictionary(), nan_direction_hint) < 0; });
auto new_first = first; auto new_first = first;
for (auto j = first + 1; j < last; ++j) for (auto j = first + 1; j < last; ++j)
{ {
if (compareAt(res[new_first], res[j], *this, nan_direction_hint) != 0) if (comparator(res[new_first], res[j]) != 0)
{ {
if (j - new_first > 1) if (j - new_first > 1)
new_ranges.emplace_back(new_first, j); new_ranges.emplace_back(new_first, j);
@ -379,17 +393,12 @@ void ColumnLowCardinality::updatePermutation(bool reverse, size_t limit, int nan
/// Since then we are working inside the interval. /// Since then we are working inside the interval.
if (reverse) std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less);
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, [this, nan_direction_hint](size_t a, size_t b)
{return getDictionary().compareAt(getIndexes().getUInt(a), getIndexes().getUInt(b), getDictionary(), nan_direction_hint) > 0; });
else
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, [this, nan_direction_hint](size_t a, size_t b)
{return getDictionary().compareAt(getIndexes().getUInt(a), getIndexes().getUInt(b), getDictionary(), nan_direction_hint) < 0; });
auto new_first = first; auto new_first = first;
for (auto j = first + 1; j < limit; ++j) for (auto j = first + 1; j < limit; ++j)
{ {
if (getDictionary().compareAt(getIndexes().getUInt(res[new_first]), getIndexes().getUInt(res[j]), getDictionary(), nan_direction_hint) != 0) if (comparator(res[new_first],res[j]) != 0)
{ {
if (j - new_first > 1) if (j - new_first > 1)
new_ranges.emplace_back(new_first, j); new_ranges.emplace_back(new_first, j);
@ -401,7 +410,7 @@ void ColumnLowCardinality::updatePermutation(bool reverse, size_t limit, int nan
auto new_last = limit; auto new_last = limit;
for (auto j = limit; j < last; ++j) for (auto j = limit; j < last; ++j)
{ {
if (getDictionary().compareAt(getIndexes().getUInt(res[new_first]), getIndexes().getUInt(res[j]), getDictionary(), nan_direction_hint) == 0) if (comparator(res[new_first], res[j]) == 0)
{ {
std::swap(res[new_last], res[j]); std::swap(res[new_last], res[j]);
++new_last; ++new_last;
@ -412,6 +421,38 @@ void ColumnLowCardinality::updatePermutation(bool reverse, size_t limit, int nan
} }
} }
void ColumnLowCardinality::getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const
{
getPermutationImpl(reverse, limit, nan_direction_hint, res);
}
void ColumnLowCardinality::updatePermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const
{
auto comparator = [this, nan_direction_hint, reverse](size_t lhs, size_t rhs)
{
int ret = getDictionary().compareAt(getIndexes().getUInt(lhs), getIndexes().getUInt(rhs), getDictionary(), nan_direction_hint);
return reverse ? -ret : ret;
};
updatePermutationImpl(limit, res, equal_ranges, comparator);
}
void ColumnLowCardinality::getPermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const
{
getPermutationImpl(reverse, limit, nan_direction_hint, res, &collator);
}
void ColumnLowCardinality::updatePermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges & equal_ranges) const
{
auto comparator = [this, &collator, reverse, nan_direction_hint](size_t lhs, size_t rhs)
{
int ret = getDictionary().getNestedColumn()->compareAtWithCollation(getIndexes().getUInt(lhs), getIndexes().getUInt(rhs), *getDictionary().getNestedColumn(), nan_direction_hint, collator);
return reverse ? -ret : ret;
};
updatePermutationImpl(limit, res, equal_ranges, comparator);
}
std::vector<MutableColumnPtr> ColumnLowCardinality::scatter(ColumnIndex num_columns, const Selector & selector) const std::vector<MutableColumnPtr> ColumnLowCardinality::scatter(ColumnIndex num_columns, const Selector & selector) const
{ {
auto columns = getIndexes().scatter(num_columns, selector); auto columns = getIndexes().scatter(num_columns, selector);

View File

@ -125,10 +125,16 @@ public:
PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results, PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results,
int direction, int nan_direction_hint) const override; int direction, int nan_direction_hint) const override;
int compareAtWithCollation(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint, const Collator &) const override;
void getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override; void getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override;
void updatePermutation(bool reverse, size_t limit, int, IColumn::Permutation & res, EqualRanges & equal_range) const override; void updatePermutation(bool reverse, size_t limit, int, IColumn::Permutation & res, EqualRanges & equal_range) const override;
void getPermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override;
void updatePermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges& equal_range) const override;
ColumnPtr replicate(const Offsets & offsets) const override ColumnPtr replicate(const Offsets & offsets) const override
{ {
return ColumnLowCardinality::create(dictionary.getColumnUniquePtr(), getIndexes().replicate(offsets)); return ColumnLowCardinality::create(dictionary.getColumnUniquePtr(), getIndexes().replicate(offsets));
@ -170,6 +176,7 @@ public:
size_t sizeOfValueIfFixed() const override { return getDictionary().sizeOfValueIfFixed(); } size_t sizeOfValueIfFixed() const override { return getDictionary().sizeOfValueIfFixed(); }
bool isNumeric() const override { return getDictionary().isNumeric(); } bool isNumeric() const override { return getDictionary().isNumeric(); }
bool lowCardinality() const override { return true; } bool lowCardinality() const override { return true; }
bool isCollationSupported() const override { return getDictionary().getNestedColumn()->isCollationSupported(); }
/** /**
* Checks if the dictionary column is Nullable(T). * Checks if the dictionary column is Nullable(T).
@ -309,6 +316,13 @@ private:
void compactInplace(); void compactInplace();
void compactIfSharedDictionary(); void compactIfSharedDictionary();
int compareAtImpl(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint, const Collator * collator=nullptr) const;
void getPermutationImpl(bool reverse, size_t limit, int nan_direction_hint, Permutation & res, const Collator * collator = nullptr) const;
template <typename Cmp>
void updatePermutationImpl(size_t limit, Permutation & res, EqualRanges & equal_ranges, Cmp comparator) const;
}; };

View File

@ -6,6 +6,7 @@
#include <Common/WeakHash.h> #include <Common/WeakHash.h>
#include <Columns/ColumnNullable.h> #include <Columns/ColumnNullable.h>
#include <Columns/ColumnConst.h> #include <Columns/ColumnConst.h>
#include <Columns/ColumnString.h>
#include <DataStreams/ColumnGathererStream.h> #include <DataStreams/ColumnGathererStream.h>
@ -223,7 +224,7 @@ ColumnPtr ColumnNullable::index(const IColumn & indexes, size_t limit) const
return ColumnNullable::create(indexed_data, indexed_null_map); return ColumnNullable::create(indexed_data, indexed_null_map);
} }
int ColumnNullable::compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const int ColumnNullable::compareAtImpl(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint, const Collator * collator) const
{ {
/// NULL values share the properties of NaN values. /// NULL values share the properties of NaN values.
/// Here the last parameter of compareAt is called null_direction_hint /// Here the last parameter of compareAt is called null_direction_hint
@ -245,9 +246,22 @@ int ColumnNullable::compareAt(size_t n, size_t m, const IColumn & rhs_, int null
} }
const IColumn & nested_rhs = nullable_rhs.getNestedColumn(); const IColumn & nested_rhs = nullable_rhs.getNestedColumn();
if (collator)
return getNestedColumn().compareAtWithCollation(n, m, nested_rhs, null_direction_hint, *collator);
return getNestedColumn().compareAt(n, m, nested_rhs, null_direction_hint); return getNestedColumn().compareAt(n, m, nested_rhs, null_direction_hint);
} }
int ColumnNullable::compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const
{
return compareAtImpl(n, m, rhs_, null_direction_hint);
}
int ColumnNullable::compareAtWithCollation(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint, const Collator & collator) const
{
return compareAtImpl(n, m, rhs_, null_direction_hint, &collator);
}
void ColumnNullable::compareColumn(const IColumn & rhs, size_t rhs_row_num, void ColumnNullable::compareColumn(const IColumn & rhs, size_t rhs_row_num,
PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results, PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results,
int direction, int nan_direction_hint) const int direction, int nan_direction_hint) const
@ -256,9 +270,13 @@ void ColumnNullable::compareColumn(const IColumn & rhs, size_t rhs_row_num,
compare_results, direction, nan_direction_hint); compare_results, direction, nan_direction_hint);
} }
void ColumnNullable::getPermutation(bool reverse, size_t limit, int null_direction_hint, Permutation & res) const void ColumnNullable::getPermutationImpl(bool reverse, size_t limit, int null_direction_hint, Permutation & res, const Collator * collator) const
{ {
/// Cannot pass limit because of unknown amount of NULLs. /// Cannot pass limit because of unknown amount of NULLs.
if (collator)
getNestedColumn().getPermutationWithCollation(*collator, reverse, 0, null_direction_hint, res);
else
getNestedColumn().getPermutation(reverse, 0, null_direction_hint, res); getNestedColumn().getPermutation(reverse, 0, null_direction_hint, res);
if ((null_direction_hint > 0) != reverse) if ((null_direction_hint > 0) != reverse)
@ -329,7 +347,7 @@ void ColumnNullable::getPermutation(bool reverse, size_t limit, int null_directi
} }
} }
void ColumnNullable::updatePermutation(bool reverse, size_t limit, int null_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const void ColumnNullable::updatePermutationImpl(bool reverse, size_t limit, int null_direction_hint, Permutation & res, EqualRanges & equal_ranges, const Collator * collator) const
{ {
if (equal_ranges.empty()) if (equal_ranges.empty())
return; return;
@ -432,12 +450,35 @@ void ColumnNullable::updatePermutation(bool reverse, size_t limit, int null_dire
} }
} }
if (collator)
getNestedColumn().updatePermutationWithCollation(*collator, reverse, limit, null_direction_hint, res, new_ranges);
else
getNestedColumn().updatePermutation(reverse, limit, null_direction_hint, res, new_ranges); getNestedColumn().updatePermutation(reverse, limit, null_direction_hint, res, new_ranges);
equal_ranges = std::move(new_ranges); equal_ranges = std::move(new_ranges);
std::move(null_ranges.begin(), null_ranges.end(), std::back_inserter(equal_ranges)); std::move(null_ranges.begin(), null_ranges.end(), std::back_inserter(equal_ranges));
} }
void ColumnNullable::getPermutation(bool reverse, size_t limit, int null_direction_hint, Permutation & res) const
{
getPermutationImpl(reverse, limit, null_direction_hint, res);
}
void ColumnNullable::updatePermutation(bool reverse, size_t limit, int null_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const
{
updatePermutationImpl(reverse, limit, null_direction_hint, res, equal_ranges);
}
void ColumnNullable::getPermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int null_direction_hint, Permutation & res) const
{
getPermutationImpl(reverse, limit, null_direction_hint, res, &collator);
}
void ColumnNullable::updatePermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int null_direction_hint, Permutation & res, EqualRanges & equal_range) const
{
updatePermutationImpl(reverse, limit, null_direction_hint, res, equal_range, &collator);
}
void ColumnNullable::gather(ColumnGathererStream & gatherer) void ColumnNullable::gather(ColumnGathererStream & gatherer)
{ {
gatherer.gather(*this); gatherer.gather(*this);

View File

@ -6,6 +6,7 @@
#include <Common/typeid_cast.h> #include <Common/typeid_cast.h>
#include <Common/assert_cast.h> #include <Common/assert_cast.h>
class Collator;
namespace DB namespace DB
{ {
@ -92,8 +93,12 @@ public:
void compareColumn(const IColumn & rhs, size_t rhs_row_num, void compareColumn(const IColumn & rhs, size_t rhs_row_num,
PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results, PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results,
int direction, int nan_direction_hint) const override; int direction, int nan_direction_hint) const override;
int compareAtWithCollation(size_t n, size_t m, const IColumn & rhs, int null_direction_hint, const Collator &) const override;
void getPermutation(bool reverse, size_t limit, int null_direction_hint, Permutation & res) const override; void getPermutation(bool reverse, size_t limit, int null_direction_hint, Permutation & res) const override;
void updatePermutation(bool reverse, size_t limit, int, Permutation & res, EqualRanges & equal_range) const override; void updatePermutation(bool reverse, size_t limit, int null_direction_hint, Permutation & res, EqualRanges & equal_range) const override;
void getPermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int null_direction_hint, Permutation & res) const override;
void updatePermutationWithCollation(
const Collator & collator, bool reverse, size_t limit, int null_direction_hint, Permutation & res, EqualRanges& equal_range) const override;
void reserve(size_t n) override; void reserve(size_t n) override;
size_t byteSize() const override; size_t byteSize() const override;
size_t allocatedBytes() const override; size_t allocatedBytes() const override;
@ -129,6 +134,7 @@ public:
bool valuesHaveFixedSize() const override { return nested_column->valuesHaveFixedSize(); } bool valuesHaveFixedSize() const override { return nested_column->valuesHaveFixedSize(); }
size_t sizeOfValueIfFixed() const override { return null_map->sizeOfValueIfFixed() + nested_column->sizeOfValueIfFixed(); } size_t sizeOfValueIfFixed() const override { return null_map->sizeOfValueIfFixed() + nested_column->sizeOfValueIfFixed(); }
bool onlyNull() const override { return nested_column->isDummy(); } bool onlyNull() const override { return nested_column->isDummy(); }
bool isCollationSupported() const override { return nested_column->isCollationSupported(); }
/// Return the column that represents values. /// Return the column that represents values.
@ -164,6 +170,13 @@ private:
template <bool negative> template <bool negative>
void applyNullMapImpl(const ColumnUInt8 & map); void applyNullMapImpl(const ColumnUInt8 & map);
int compareAtImpl(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint, const Collator * collator=nullptr) const;
void getPermutationImpl(bool reverse, size_t limit, int null_direction_hint, Permutation & res, const Collator * collator = nullptr) const;
void updatePermutationImpl(
bool reverse, size_t limit, int null_direction_hint, Permutation & res, EqualRanges & equal_ranges, const Collator * collator = nullptr) const;
}; };
ColumnPtr makeNullable(const ColumnPtr & column); ColumnPtr makeNullable(const ColumnPtr & column);

View File

@ -285,21 +285,22 @@ void ColumnString::compareColumn(
} }
template <bool positive> template <bool positive>
struct ColumnString::less struct ColumnString::Cmp
{ {
const ColumnString & parent; const ColumnString & parent;
explicit less(const ColumnString & parent_) : parent(parent_) {} explicit Cmp(const ColumnString & parent_) : parent(parent_) {}
bool operator()(size_t lhs, size_t rhs) const int operator()(size_t lhs, size_t rhs) const
{ {
int res = memcmpSmallAllowOverflow15( int res = memcmpSmallAllowOverflow15(
parent.chars.data() + parent.offsetAt(lhs), parent.sizeAt(lhs) - 1, parent.chars.data() + parent.offsetAt(lhs), parent.sizeAt(lhs) - 1,
parent.chars.data() + parent.offsetAt(rhs), parent.sizeAt(rhs) - 1); parent.chars.data() + parent.offsetAt(rhs), parent.sizeAt(rhs) - 1);
return positive ? (res < 0) : (res > 0); return positive ? res : -res;
} }
}; };
void ColumnString::getPermutation(bool reverse, size_t limit, int /*nan_direction_hint*/, Permutation & res) const template <typename Comparator>
void ColumnString::getPermutationImpl(size_t limit, Permutation & res, Comparator cmp) const
{ {
size_t s = offsets.size(); size_t s = offsets.size();
res.resize(s); res.resize(s);
@ -309,23 +310,16 @@ void ColumnString::getPermutation(bool reverse, size_t limit, int /*nan_directio
if (limit >= s) if (limit >= s)
limit = 0; limit = 0;
auto less = [&cmp](size_t lhs, size_t rhs){ return cmp(lhs, rhs) < 0; };
if (limit) if (limit)
{ std::partial_sort(res.begin(), res.begin() + limit, res.end(), less);
if (reverse)
std::partial_sort(res.begin(), res.begin() + limit, res.end(), less<false>(*this));
else else
std::partial_sort(res.begin(), res.begin() + limit, res.end(), less<true>(*this)); std::sort(res.begin(), res.end(), less);
}
else
{
if (reverse)
std::sort(res.begin(), res.end(), less<false>(*this));
else
std::sort(res.begin(), res.end(), less<true>(*this));
}
} }
void ColumnString::updatePermutation(bool reverse, size_t limit, int /*nan_direction_hint*/, Permutation & res, EqualRanges & equal_ranges) const template <typename Comparator>
void ColumnString::updatePermutationImpl(size_t limit, Permutation & res, EqualRanges & equal_ranges, Comparator cmp) const
{ {
if (equal_ranges.empty()) if (equal_ranges.empty())
return; return;
@ -340,21 +334,17 @@ void ColumnString::updatePermutation(bool reverse, size_t limit, int /*nan_direc
if (limit) if (limit)
--number_of_ranges; --number_of_ranges;
auto less = [&cmp](size_t lhs, size_t rhs){ return cmp(lhs, rhs) < 0; };
for (size_t i = 0; i < number_of_ranges; ++i) for (size_t i = 0; i < number_of_ranges; ++i)
{ {
const auto & [first, last] = equal_ranges[i]; const auto & [first, last] = equal_ranges[i];
std::sort(res.begin() + first, res.begin() + last, less);
if (reverse)
std::sort(res.begin() + first, res.begin() + last, less<false>(*this));
else
std::sort(res.begin() + first, res.begin() + last, less<true>(*this));
size_t new_first = first; size_t new_first = first;
for (size_t j = first + 1; j < last; ++j) for (size_t j = first + 1; j < last; ++j)
{ {
if (memcmpSmallAllowOverflow15( if (cmp(res[j], res[new_first]) != 0)
chars.data() + offsetAt(res[j]), sizeAt(res[j]) - 1,
chars.data() + offsetAt(res[new_first]), sizeAt(res[new_first]) - 1) != 0)
{ {
if (j - new_first > 1) if (j - new_first > 1)
new_ranges.emplace_back(new_first, j); new_ranges.emplace_back(new_first, j);
@ -375,17 +365,12 @@ void ColumnString::updatePermutation(bool reverse, size_t limit, int /*nan_direc
/// Since then we are working inside the interval. /// Since then we are working inside the interval.
if (reverse) std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less);
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less<false>(*this));
else
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less<true>(*this));
size_t new_first = first; size_t new_first = first;
for (size_t j = first + 1; j < limit; ++j) for (size_t j = first + 1; j < limit; ++j)
{ {
if (memcmpSmallAllowOverflow15( if (cmp(res[j], res[new_first]) != 0)
chars.data() + offsetAt(res[j]), sizeAt(res[j]) - 1,
chars.data() + offsetAt(res[new_first]), sizeAt(res[new_first]) - 1) != 0)
{ {
if (j - new_first > 1) if (j - new_first > 1)
new_ranges.emplace_back(new_first, j); new_ranges.emplace_back(new_first, j);
@ -395,9 +380,7 @@ void ColumnString::updatePermutation(bool reverse, size_t limit, int /*nan_direc
size_t new_last = limit; size_t new_last = limit;
for (size_t j = limit; j < last; ++j) for (size_t j = limit; j < last; ++j)
{ {
if (memcmpSmallAllowOverflow15( if (cmp(res[j], res[new_first]) == 0)
chars.data() + offsetAt(res[j]), sizeAt(res[j]) - 1,
chars.data() + offsetAt(res[new_first]), sizeAt(res[new_first]) - 1) == 0)
{ {
std::swap(res[j], res[new_last]); std::swap(res[j], res[new_last]);
++new_last; ++new_last;
@ -408,6 +391,56 @@ void ColumnString::updatePermutation(bool reverse, size_t limit, int /*nan_direc
} }
} }
void ColumnString::getPermutation(bool reverse, size_t limit, int /*nan_direction_hint*/, Permutation & res) const
{
if (reverse)
getPermutationImpl(limit, res, Cmp<false>(*this));
else
getPermutationImpl(limit, res, Cmp<true>(*this));
}
void ColumnString::updatePermutation(bool reverse, size_t limit, int /*nan_direction_hint*/, Permutation & res, EqualRanges & equal_ranges) const
{
if (reverse)
updatePermutationImpl(limit, res, equal_ranges, Cmp<false>(*this));
else
updatePermutationImpl(limit, res, equal_ranges, Cmp<true>(*this));
}
template <bool positive>
struct ColumnString::CmpWithCollation
{
const ColumnString & parent;
const Collator & collator;
CmpWithCollation(const ColumnString & parent_, const Collator & collator_) : parent(parent_), collator(collator_) {}
int operator()(size_t lhs, size_t rhs) const
{
int res = collator.compare(
reinterpret_cast<const char *>(&parent.chars[parent.offsetAt(lhs)]), parent.sizeAt(lhs),
reinterpret_cast<const char *>(&parent.chars[parent.offsetAt(rhs)]), parent.sizeAt(rhs));
return positive ? res : -res;
}
};
void ColumnString::getPermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int, Permutation & res) const
{
if (reverse)
getPermutationImpl(limit, res, CmpWithCollation<false>(*this, collator));
else
getPermutationImpl(limit, res, CmpWithCollation<true>(*this, collator));
}
void ColumnString::updatePermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int, Permutation & res, EqualRanges & equal_ranges) const
{
if (reverse)
updatePermutationImpl(limit, res, equal_ranges, CmpWithCollation<false>(*this, collator));
else
updatePermutationImpl(limit, res, equal_ranges, CmpWithCollation<true>(*this, collator));
}
ColumnPtr ColumnString::replicate(const Offsets & replicate_offsets) const ColumnPtr ColumnString::replicate(const Offsets & replicate_offsets) const
{ {
size_t col_size = size(); size_t col_size = size();
@ -476,13 +509,13 @@ void ColumnString::getExtremes(Field & min, Field & max) const
size_t min_idx = 0; size_t min_idx = 0;
size_t max_idx = 0; size_t max_idx = 0;
less<true> less_op(*this); Cmp<true> cmp_op(*this);
for (size_t i = 1; i < col_size; ++i) for (size_t i = 1; i < col_size; ++i)
{ {
if (less_op(i, min_idx)) if (cmp_op(i, min_idx) < 0)
min_idx = i; min_idx = i;
else if (less_op(max_idx, i)) else if (cmp_op(max_idx, i) < 0)
max_idx = i; max_idx = i;
} }
@ -491,7 +524,7 @@ void ColumnString::getExtremes(Field & min, Field & max) const
} }
int ColumnString::compareAtWithCollation(size_t n, size_t m, const IColumn & rhs_, const Collator & collator) const int ColumnString::compareAtWithCollation(size_t n, size_t m, const IColumn & rhs_, int, const Collator & collator) const
{ {
const ColumnString & rhs = assert_cast<const ColumnString &>(rhs_); const ColumnString & rhs = assert_cast<const ColumnString &>(rhs_);
@ -500,134 +533,6 @@ int ColumnString::compareAtWithCollation(size_t n, size_t m, const IColumn & rhs
reinterpret_cast<const char *>(&rhs.chars[rhs.offsetAt(m)]), rhs.sizeAt(m)); reinterpret_cast<const char *>(&rhs.chars[rhs.offsetAt(m)]), rhs.sizeAt(m));
} }
template <bool positive>
struct ColumnString::lessWithCollation
{
const ColumnString & parent;
const Collator & collator;
lessWithCollation(const ColumnString & parent_, const Collator & collator_) : parent(parent_), collator(collator_) {}
bool operator()(size_t lhs, size_t rhs) const
{
int res = collator.compare(
reinterpret_cast<const char *>(&parent.chars[parent.offsetAt(lhs)]), parent.sizeAt(lhs),
reinterpret_cast<const char *>(&parent.chars[parent.offsetAt(rhs)]), parent.sizeAt(rhs));
return positive ? (res < 0) : (res > 0);
}
};
void ColumnString::getPermutationWithCollation(const Collator & collator, bool reverse, size_t limit, Permutation & res) const
{
size_t s = offsets.size();
res.resize(s);
for (size_t i = 0; i < s; ++i)
res[i] = i;
if (limit >= s)
limit = 0;
if (limit)
{
if (reverse)
std::partial_sort(res.begin(), res.begin() + limit, res.end(), lessWithCollation<false>(*this, collator));
else
std::partial_sort(res.begin(), res.begin() + limit, res.end(), lessWithCollation<true>(*this, collator));
}
else
{
if (reverse)
std::sort(res.begin(), res.end(), lessWithCollation<false>(*this, collator));
else
std::sort(res.begin(), res.end(), lessWithCollation<true>(*this, collator));
}
}
void ColumnString::updatePermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int, Permutation & res, EqualRanges & equal_ranges) const
{
if (equal_ranges.empty())
return;
if (limit >= size() || limit >= equal_ranges.back().second)
limit = 0;
size_t number_of_ranges = equal_ranges.size();
if (limit)
--number_of_ranges;
EqualRanges new_ranges;
SCOPE_EXIT({equal_ranges = std::move(new_ranges);});
for (size_t i = 0; i < number_of_ranges; ++i)
{
const auto& [first, last] = equal_ranges[i];
if (reverse)
std::sort(res.begin() + first, res.begin() + last, lessWithCollation<false>(*this, collator));
else
std::sort(res.begin() + first, res.begin() + last, lessWithCollation<true>(*this, collator));
auto new_first = first;
for (auto j = first + 1; j < last; ++j)
{
if (collator.compare(
reinterpret_cast<const char *>(&chars[offsetAt(res[new_first])]), sizeAt(res[new_first]),
reinterpret_cast<const char *>(&chars[offsetAt(res[j])]), sizeAt(res[j])) != 0)
{
if (j - new_first > 1)
new_ranges.emplace_back(new_first, j);
new_first = j;
}
}
if (last - new_first > 1)
new_ranges.emplace_back(new_first, last);
}
if (limit)
{
const auto & [first, last] = equal_ranges.back();
if (limit < first || limit > last)
return;
/// Since then we are working inside the interval.
if (reverse)
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, lessWithCollation<false>(*this, collator));
else
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, lessWithCollation<true>(*this, collator));
auto new_first = first;
for (auto j = first + 1; j < limit; ++j)
{
if (collator.compare(
reinterpret_cast<const char *>(&chars[offsetAt(res[new_first])]), sizeAt(res[new_first]),
reinterpret_cast<const char *>(&chars[offsetAt(res[j])]), sizeAt(res[j])) != 0)
{
if (j - new_first > 1)
new_ranges.emplace_back(new_first, j);
new_first = j;
}
}
auto new_last = limit;
for (auto j = limit; j < last; ++j)
{
if (collator.compare(
reinterpret_cast<const char *>(&chars[offsetAt(res[new_first])]), sizeAt(res[new_first]),
reinterpret_cast<const char *>(&chars[offsetAt(res[j])]), sizeAt(res[j])) == 0)
{
std::swap(res[new_last], res[j]);
++new_last;
}
}
if (new_last - new_first > 1)
new_ranges.emplace_back(new_first, new_last);
}
}
void ColumnString::protect() void ColumnString::protect()
{ {
getChars().protect(); getChars().protect();

View File

@ -43,14 +43,20 @@ private:
size_t ALWAYS_INLINE sizeAt(ssize_t i) const { return offsets[i] - offsets[i - 1]; } size_t ALWAYS_INLINE sizeAt(ssize_t i) const { return offsets[i] - offsets[i - 1]; }
template <bool positive> template <bool positive>
struct less; struct Cmp;
template <bool positive> template <bool positive>
struct lessWithCollation; struct CmpWithCollation;
ColumnString() = default; ColumnString() = default;
ColumnString(const ColumnString & src); ColumnString(const ColumnString & src);
template <typename Comparator>
void getPermutationImpl(size_t limit, Permutation & res, Comparator cmp) const;
template <typename Comparator>
void updatePermutationImpl(size_t limit, Permutation & res, EqualRanges & equal_ranges, Comparator cmp) const;
public: public:
const char * getFamilyName() const override { return "String"; } const char * getFamilyName() const override { return "String"; }
TypeIndex getDataType() const override { return TypeIndex::String; } TypeIndex getDataType() const override { return TypeIndex::String; }
@ -229,16 +235,16 @@ public:
int direction, int nan_direction_hint) const override; int direction, int nan_direction_hint) const override;
/// Variant of compareAt for string comparison with respect of collation. /// Variant of compareAt for string comparison with respect of collation.
int compareAtWithCollation(size_t n, size_t m, const IColumn & rhs_, const Collator & collator) const; int compareAtWithCollation(size_t n, size_t m, const IColumn & rhs_, int, const Collator & collator) const override;
void getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override; void getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override;
void updatePermutation(bool reverse, size_t limit, int, Permutation & res, EqualRanges & equal_range) const override; void updatePermutation(bool reverse, size_t limit, int, Permutation & res, EqualRanges & equal_ranges) const override;
/// Sorting with respect of collation. /// Sorting with respect of collation.
void getPermutationWithCollation(const Collator & collator, bool reverse, size_t limit, Permutation & res) const; void getPermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int, Permutation & res) const override;
void updatePermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int, Permutation & res, EqualRanges& equal_range) const; void updatePermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int, Permutation & res, EqualRanges & equal_ranges) const override;
ColumnPtr replicate(const Offsets & replicate_offsets) const override; ColumnPtr replicate(const Offsets & replicate_offsets) const override;
@ -270,6 +276,8 @@ public:
// Throws an exception if offsets/chars are messed up // Throws an exception if offsets/chars are messed up
void validate() const; void validate() const;
bool isCollationSupported() const override { return true; }
}; };

View File

@ -275,16 +275,27 @@ MutableColumns ColumnTuple::scatter(ColumnIndex num_columns, const Selector & se
return res; return res;
} }
int ColumnTuple::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const int ColumnTuple::compareAtImpl(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint, const Collator * collator) const
{ {
const size_t tuple_size = columns.size(); const size_t tuple_size = columns.size();
for (size_t i = 0; i < tuple_size; ++i) for (size_t i = 0; i < tuple_size; ++i)
if (int res = columns[i]->compareAt(n, m, *assert_cast<const ColumnTuple &>(rhs).columns[i], nan_direction_hint)) {
int res;
if (collator && columns[i]->isCollationSupported())
res = columns[i]->compareAtWithCollation(n, m, *assert_cast<const ColumnTuple &>(rhs).columns[i], nan_direction_hint, *collator);
else
res = columns[i]->compareAt(n, m, *assert_cast<const ColumnTuple &>(rhs).columns[i], nan_direction_hint);
if (res)
return res; return res;
}
return 0; return 0;
} }
int ColumnTuple::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
{
return compareAtImpl(n, m, rhs, nan_direction_hint);
}
void ColumnTuple::compareColumn(const IColumn & rhs, size_t rhs_row_num, void ColumnTuple::compareColumn(const IColumn & rhs, size_t rhs_row_num,
PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results, PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results,
int direction, int nan_direction_hint) const int direction, int nan_direction_hint) const
@ -293,14 +304,20 @@ void ColumnTuple::compareColumn(const IColumn & rhs, size_t rhs_row_num,
compare_results, direction, nan_direction_hint); compare_results, direction, nan_direction_hint);
} }
int ColumnTuple::compareAtWithCollation(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint, const Collator & collator) const
{
return compareAtImpl(n, m, rhs, nan_direction_hint, &collator);
}
template <bool positive> template <bool positive>
struct ColumnTuple::Less struct ColumnTuple::Less
{ {
TupleColumns columns; TupleColumns columns;
int nan_direction_hint; int nan_direction_hint;
const Collator * collator;
Less(const TupleColumns & columns_, int nan_direction_hint_) Less(const TupleColumns & columns_, int nan_direction_hint_, const Collator * collator_=nullptr)
: columns(columns_), nan_direction_hint(nan_direction_hint_) : columns(columns_), nan_direction_hint(nan_direction_hint_), collator(collator_)
{ {
} }
@ -308,7 +325,11 @@ struct ColumnTuple::Less
{ {
for (const auto & column : columns) for (const auto & column : columns)
{ {
int res = column->compareAt(a, b, *column, nan_direction_hint); int res;
if (collator && column->isCollationSupported())
res = column->compareAtWithCollation(a, b, *column, nan_direction_hint, *collator);
else
res = column->compareAt(a, b, *column, nan_direction_hint);
if (res < 0) if (res < 0)
return positive; return positive;
else if (res > 0) else if (res > 0)
@ -318,7 +339,8 @@ struct ColumnTuple::Less
} }
}; };
void ColumnTuple::getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const template <typename LessOperator>
void ColumnTuple::getPermutationImpl(size_t limit, Permutation & res, LessOperator less) const
{ {
size_t rows = size(); size_t rows = size();
res.resize(rows); res.resize(rows);
@ -330,27 +352,24 @@ void ColumnTuple::getPermutation(bool reverse, size_t limit, int nan_direction_h
if (limit) if (limit)
{ {
if (reverse) std::partial_sort(res.begin(), res.begin() + limit, res.end(), less);
std::partial_sort(res.begin(), res.begin() + limit, res.end(), Less<false>(columns, nan_direction_hint));
else
std::partial_sort(res.begin(), res.begin() + limit, res.end(), Less<true>(columns, nan_direction_hint));
} }
else else
{ {
if (reverse) std::sort(res.begin(), res.end(), less);
std::sort(res.begin(), res.end(), Less<false>(columns, nan_direction_hint));
else
std::sort(res.begin(), res.end(), Less<true>(columns, nan_direction_hint));
} }
} }
void ColumnTuple::updatePermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const void ColumnTuple::updatePermutationImpl(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges, const Collator * collator) const
{ {
if (equal_ranges.empty()) if (equal_ranges.empty())
return; return;
for (const auto & column : columns) for (const auto & column : columns)
{ {
if (collator && column->isCollationSupported())
column->updatePermutationWithCollation(*collator, reverse, limit, nan_direction_hint, res, equal_ranges);
else
column->updatePermutation(reverse, limit, nan_direction_hint, res, equal_ranges); column->updatePermutation(reverse, limit, nan_direction_hint, res, equal_ranges);
while (limit && !equal_ranges.empty() && limit <= equal_ranges.back().first) while (limit && !equal_ranges.empty() && limit <= equal_ranges.back().first)
@ -361,6 +380,32 @@ void ColumnTuple::updatePermutation(bool reverse, size_t limit, int nan_directio
} }
} }
void ColumnTuple::getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const
{
if (reverse)
getPermutationImpl(limit, res, Less<false>(columns, nan_direction_hint));
else
getPermutationImpl(limit, res, Less<true>(columns, nan_direction_hint));
}
void ColumnTuple::updatePermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const
{
updatePermutationImpl(reverse, limit, nan_direction_hint, res, equal_ranges);
}
void ColumnTuple::getPermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const
{
if (reverse)
getPermutationImpl(limit, res, Less<false>(columns, nan_direction_hint, &collator));
else
getPermutationImpl(limit, res, Less<true>(columns, nan_direction_hint, &collator));
}
void ColumnTuple::updatePermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges & equal_ranges) const
{
updatePermutationImpl(reverse, limit, nan_direction_hint, res, equal_ranges, &collator);
}
void ColumnTuple::gather(ColumnGathererStream & gatherer) void ColumnTuple::gather(ColumnGathererStream & gatherer)
{ {
gatherer.gather(*this); gatherer.gather(*this);
@ -433,5 +478,15 @@ bool ColumnTuple::structureEquals(const IColumn & rhs) const
return false; return false;
} }
bool ColumnTuple::isCollationSupported() const
{
for (const auto& column : columns)
{
if (column->isCollationSupported())
return true;
}
return false;
}
} }

View File

@ -75,15 +75,19 @@ public:
void compareColumn(const IColumn & rhs, size_t rhs_row_num, void compareColumn(const IColumn & rhs, size_t rhs_row_num,
PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results, PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results,
int direction, int nan_direction_hint) const override; int direction, int nan_direction_hint) const override;
int compareAtWithCollation(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint, const Collator & collator) const override;
void getExtremes(Field & min, Field & max) const override; void getExtremes(Field & min, Field & max) const override;
void getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override; void getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override;
void updatePermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_range) const override; void updatePermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const override;
void getPermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override;
void updatePermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges& equal_ranges) const override;
void reserve(size_t n) override; void reserve(size_t n) override;
size_t byteSize() const override; size_t byteSize() const override;
size_t allocatedBytes() const override; size_t allocatedBytes() const override;
void protect() override; void protect() override;
void forEachSubcolumn(ColumnCallback callback) override; void forEachSubcolumn(ColumnCallback callback) override;
bool structureEquals(const IColumn & rhs) const override; bool structureEquals(const IColumn & rhs) const override;
bool isCollationSupported() const override;
size_t tupleSize() const { return columns.size(); } size_t tupleSize() const { return columns.size(); }
@ -94,6 +98,15 @@ public:
Columns getColumnsCopy() const { return {columns.begin(), columns.end()}; } Columns getColumnsCopy() const { return {columns.begin(), columns.end()}; }
const ColumnPtr & getColumnPtr(size_t idx) const { return columns[idx]; } const ColumnPtr & getColumnPtr(size_t idx) const { return columns[idx]; }
private:
int compareAtImpl(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint, const Collator * collator=nullptr) const;
template <typename LessOperator>
void getPermutationImpl(size_t limit, Permutation & res, LessOperator less) const;
void updatePermutationImpl(
bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges, const Collator * collator=nullptr) const;
}; };

View File

@ -9,7 +9,7 @@
class SipHash; class SipHash;
class Collator;
namespace DB namespace DB
{ {
@ -18,6 +18,7 @@ namespace ErrorCodes
{ {
extern const int CANNOT_GET_SIZE_OF_FIELD; extern const int CANNOT_GET_SIZE_OF_FIELD;
extern const int NOT_IMPLEMENTED; extern const int NOT_IMPLEMENTED;
extern const int BAD_COLLATION;
} }
class Arena; class Arena;
@ -250,6 +251,12 @@ public:
*/ */
virtual int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const = 0; virtual int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const = 0;
/// Equivalent to compareAt, but collator is used to compare values.
virtual int compareAtWithCollation(size_t, size_t, const IColumn &, int, const Collator &) const
{
throw Exception("Collations could be specified only for String, LowCardinality(String), Nullable(String) or for Array or Tuple, containing it.", ErrorCodes::BAD_COLLATION);
}
/// Compare the whole column with single value from rhs column. /// Compare the whole column with single value from rhs column.
/// If row_indexes is nullptr, it's ignored. Otherwise, it is a set of rows to compare. /// If row_indexes is nullptr, it's ignored. Otherwise, it is a set of rows to compare.
/// compare_results[i] will be equal to compareAt(row_indexes[i], rhs_row_num, rhs, nan_direction_hint) * direction /// compare_results[i] will be equal to compareAt(row_indexes[i], rhs_row_num, rhs, nan_direction_hint) * direction
@ -277,6 +284,18 @@ public:
*/ */
virtual void updatePermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges & equal_ranges) const = 0; virtual void updatePermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges & equal_ranges) const = 0;
/** Equivalent to getPermutation and updatePermutation but collator is used to compare values.
* Supported for String, LowCardinality(String), Nullable(String) and for Array and Tuple, containing them.
*/
virtual void getPermutationWithCollation(const Collator &, bool, size_t, int, Permutation &) const
{
throw Exception("Collations could be specified only for String, LowCardinality(String), Nullable(String) or for Array or Tuple, containing them.", ErrorCodes::BAD_COLLATION);
}
virtual void updatePermutationWithCollation(const Collator &, bool, size_t, int, Permutation &, EqualRanges&) const
{
throw Exception("Collations could be specified only for String, LowCardinality(String), Nullable(String) or for Array or Tuple, containing them.", ErrorCodes::BAD_COLLATION);
}
/** Copies each element according offsets parameter. /** Copies each element according offsets parameter.
* (i-th element should be copied offsets[i] - offsets[i - 1] times.) * (i-th element should be copied offsets[i] - offsets[i - 1] times.)
* It is necessary in ARRAY JOIN operation. * It is necessary in ARRAY JOIN operation.
@ -402,6 +421,8 @@ public:
virtual bool lowCardinality() const { return false; } virtual bool lowCardinality() const { return false; }
virtual bool isCollationSupported() const { return false; }
virtual ~IColumn() = default; virtual ~IColumn() = default;
IColumn() = default; IColumn() = default;
IColumn(const IColumn &) = default; IColumn(const IColumn &) = default;

View File

@ -71,7 +71,8 @@ void checkColumn(
std::unordered_map<UInt32, T> map; std::unordered_map<UInt32, T> map;
size_t num_collisions = 0; size_t num_collisions = 0;
std::stringstream collitions_str; std::stringstream collisions_str;
collisions_str.exceptions(std::ios::failbit);
for (size_t i = 0; i < eq_class.size(); ++i) for (size_t i = 0; i < eq_class.size(); ++i)
{ {
@ -86,14 +87,14 @@ void checkColumn(
if (num_collisions <= max_collisions_to_print) if (num_collisions <= max_collisions_to_print)
{ {
collitions_str << "Collision:\n"; collisions_str << "Collision:\n";
collitions_str << print_for_row(it->second) << '\n'; collisions_str << print_for_row(it->second) << '\n';
collitions_str << print_for_row(i) << std::endl; collisions_str << print_for_row(i) << std::endl;
} }
if (num_collisions > allowed_collisions) if (num_collisions > allowed_collisions)
{ {
std::cerr << collitions_str.rdbuf(); std::cerr << collisions_str.rdbuf();
break; break;
} }
} }

View File

@ -538,6 +538,7 @@ XMLDocumentPtr ConfigProcessor::processConfig(
*has_zk_includes = !contributing_zk_paths.empty(); *has_zk_includes = !contributing_zk_paths.empty();
std::stringstream comment; std::stringstream comment;
comment.exceptions(std::ios::failbit);
comment << " This file was generated automatically.\n"; comment << " This file was generated automatically.\n";
comment << " Do not edit it: it is likely to be discarded and generated again before it's read next time.\n"; comment << " Do not edit it: it is likely to be discarded and generated again before it's read next time.\n";
comment << " Files used to generate this file:"; comment << " Files used to generate this file:";

View File

@ -246,6 +246,7 @@ static std::string getExtraExceptionInfo(const std::exception & e)
std::string getCurrentExceptionMessage(bool with_stacktrace, bool check_embedded_stacktrace /*= false*/, bool with_extra_info /*= true*/) std::string getCurrentExceptionMessage(bool with_stacktrace, bool check_embedded_stacktrace /*= false*/, bool with_extra_info /*= true*/)
{ {
std::stringstream stream; std::stringstream stream;
stream.exceptions(std::ios::failbit);
try try
{ {
@ -365,6 +366,7 @@ void tryLogException(std::exception_ptr e, Poco::Logger * logger, const std::str
std::string getExceptionMessage(const Exception & e, bool with_stacktrace, bool check_embedded_stacktrace) std::string getExceptionMessage(const Exception & e, bool with_stacktrace, bool check_embedded_stacktrace)
{ {
std::stringstream stream; std::stringstream stream;
stream.exceptions(std::ios::failbit);
try try
{ {

View File

@ -134,6 +134,7 @@ void MemoryTracker::alloc(Int64 size)
ProfileEvents::increment(ProfileEvents::QueryMemoryLimitExceeded); ProfileEvents::increment(ProfileEvents::QueryMemoryLimitExceeded);
std::stringstream message; std::stringstream message;
message.exceptions(std::ios::failbit);
message << "Memory tracker"; message << "Memory tracker";
if (const auto * description = description_ptr.load(std::memory_order_relaxed)) if (const auto * description = description_ptr.load(std::memory_order_relaxed))
message << " " << description; message << " " << description;
@ -166,6 +167,7 @@ void MemoryTracker::alloc(Int64 size)
ProfileEvents::increment(ProfileEvents::QueryMemoryLimitExceeded); ProfileEvents::increment(ProfileEvents::QueryMemoryLimitExceeded);
std::stringstream message; std::stringstream message;
message.exceptions(std::ios::failbit);
message << "Memory limit"; message << "Memory limit";
if (const auto * description = description_ptr.load(std::memory_order_relaxed)) if (const auto * description = description_ptr.load(std::memory_order_relaxed))
message << " " << description; message << " " << description;

View File

@ -74,6 +74,7 @@ ShellCommand::~ShellCommand()
void ShellCommand::logCommand(const char * filename, char * const argv[]) void ShellCommand::logCommand(const char * filename, char * const argv[])
{ {
std::stringstream args; std::stringstream args;
args.exceptions(std::ios::failbit);
for (int i = 0; argv != nullptr && argv[i] != nullptr; ++i) for (int i = 0; argv != nullptr && argv[i] != nullptr; ++i)
{ {
if (i > 0) if (i > 0)

View File

@ -1,76 +1,44 @@
#pragma once #pragma once
#include <Common/Arena.h> #include <Common/Arena.h>
#include <ext/range.h> #include <common/unaligned.h>
#include <ext/size.h>
#include <ext/bit_cast.h>
#include <cstdlib>
#include <memory>
namespace DB namespace DB
{ {
/** Can allocate memory objects of fixed size with deletion support. /** Can allocate memory objects of fixed size with deletion support.
* For small `object_size`s allocated no less than getMinAllocationSize() bytes. */ * For small `object_size`s allocated no less than pointer size.
*/
class SmallObjectPool class SmallObjectPool
{ {
private: private:
struct Block { Block * next; };
static constexpr auto getMinAllocationSize() { return sizeof(Block); }
const size_t object_size; const size_t object_size;
Arena pool; Arena pool;
Block * free_list{}; char * free_list = nullptr;
public: public:
SmallObjectPool( SmallObjectPool(size_t object_size_)
const size_t object_size_, const size_t initial_size = 4096, const size_t growth_factor = 2, : object_size{std::max(object_size_, sizeof(char *))}
const size_t linear_growth_threshold = 128 * 1024 * 1024)
: object_size{std::max(object_size_, getMinAllocationSize())},
pool{initial_size, growth_factor, linear_growth_threshold}
{ {
if (pool.size() < object_size)
return;
const auto num_objects = pool.size() / object_size;
auto head = free_list = ext::bit_cast<Block *>(pool.alloc(num_objects * object_size));
for (const auto i : ext::range(0, num_objects - 1))
{
(void) i;
head->next = ext::bit_cast<Block *>(ext::bit_cast<char *>(head) + object_size);
head = head->next;
}
head->next = nullptr;
} }
char * alloc() char * alloc()
{ {
if (free_list) if (free_list)
{ {
const auto res = reinterpret_cast<char *>(free_list); char * res = free_list;
free_list = free_list->next; free_list = unalignedLoad<char *>(free_list);
return res; return res;
} }
return pool.alloc(object_size); return pool.alloc(object_size);
} }
void free(const void * ptr) void free(char * ptr)
{ {
union unalignedStore<char *>(ptr, free_list);
{ free_list = ptr;
const void * p_v;
Block * block;
};
p_v = ptr;
block->next = free_list;
free_list = block;
} }
/// The size of the allocated pool in bytes /// The size of the allocated pool in bytes
@ -81,5 +49,4 @@ public:
}; };
} }

View File

@ -24,6 +24,7 @@
std::string signalToErrorMessage(int sig, const siginfo_t & info, const ucontext_t & context) std::string signalToErrorMessage(int sig, const siginfo_t & info, const ucontext_t & context)
{ {
std::stringstream error; std::stringstream error;
error.exceptions(std::ios::failbit);
switch (sig) switch (sig)
{ {
case SIGSEGV: case SIGSEGV:
@ -319,6 +320,7 @@ static void toStringEveryLineImpl(
std::unordered_map<std::string, DB::Dwarf> dwarfs; std::unordered_map<std::string, DB::Dwarf> dwarfs;
std::stringstream out; std::stringstream out;
out.exceptions(std::ios::failbit);
for (size_t i = offset; i < size; ++i) for (size_t i = offset; i < size; ++i)
{ {
@ -358,6 +360,7 @@ static void toStringEveryLineImpl(
} }
#else #else
std::stringstream out; std::stringstream out;
out.exceptions(std::ios::failbit);
for (size_t i = offset; i < size; ++i) for (size_t i = offset; i < size; ++i)
{ {
@ -373,6 +376,7 @@ static void toStringEveryLineImpl(
static std::string toStringImpl(const StackTrace::FramePointers & frame_pointers, size_t offset, size_t size) static std::string toStringImpl(const StackTrace::FramePointers & frame_pointers, size_t offset, size_t size)
{ {
std::stringstream out; std::stringstream out;
out.exceptions(std::ios::failbit);
toStringEveryLineImpl(frame_pointers, offset, size, [&](const std::string & str) { out << str << '\n'; }); toStringEveryLineImpl(frame_pointers, offset, size, [&](const std::string & str) { out << str << '\n'; });
return out.str(); return out.str();
} }

View File

@ -154,6 +154,8 @@ std::pair<bool, std::string> StudentTTest::compareAndReport(size_t confidence_le
double mean_confidence_interval = table_value * t_statistic; double mean_confidence_interval = table_value * t_statistic;
std::stringstream ss; std::stringstream ss;
ss.exceptions(std::ios::failbit);
if (mean_difference > mean_confidence_interval && (mean_difference - mean_confidence_interval > 0.0001)) /// difference must be more than 0.0001, to take into account connection latency. if (mean_difference > mean_confidence_interval && (mean_difference - mean_confidence_interval > 0.0001)) /// difference must be more than 0.0001, to take into account connection latency.
{ {
ss << "Difference at " << confidence_level[confidence_level_index] << "% confidence : "; ss << "Difference at " << confidence_level[confidence_level_index] << "% confidence : ";

View File

@ -216,7 +216,7 @@ void ThreadPoolImpl<Thread>::worker(typename std::list<Thread>::iterator thread_
if (!jobs.empty()) if (!jobs.empty())
{ {
job = jobs.top().job; job = std::move(jobs.top().job);
jobs.pop(); jobs.pop();
} }
else else

View File

@ -398,8 +398,7 @@ bool PerfEventsCounters::processThreadLocalChanges(const std::string & needed_ev
return true; return true;
} }
// Parse comma-separated list of event names. Empty means all available // Parse comma-separated list of event names. Empty means all available events.
// events.
std::vector<size_t> PerfEventsCounters::eventIndicesFromString(const std::string & events_list) std::vector<size_t> PerfEventsCounters::eventIndicesFromString(const std::string & events_list)
{ {
std::vector<size_t> result; std::vector<size_t> result;
@ -418,8 +417,7 @@ std::vector<size_t> PerfEventsCounters::eventIndicesFromString(const std::string
std::string event_name; std::string event_name;
while (std::getline(iss, event_name, ',')) while (std::getline(iss, event_name, ','))
{ {
// Allow spaces at the beginning of the token, so that you can write // Allow spaces at the beginning of the token, so that you can write 'a, b'.
// 'a, b'.
event_name.erase(0, event_name.find_first_not_of(' ')); event_name.erase(0, event_name.find_first_not_of(' '));
auto entry = event_name_to_index.find(event_name); auto entry = event_name_to_index.find(event_name);

View File

@ -80,6 +80,7 @@ void ThreadStatus::assertState(const std::initializer_list<int> & permitted_stat
} }
std::stringstream ss; std::stringstream ss;
ss.exceptions(std::ios::failbit);
ss << "Unexpected thread state " << getCurrentState(); ss << "Unexpected thread state " << getCurrentState();
if (description) if (description)
ss << ": " << description; ss << ": " << description;

View File

@ -49,6 +49,7 @@ struct UInt128
String toHexString() const String toHexString() const
{ {
std::ostringstream os; std::ostringstream os;
os.exceptions(std::ios::failbit);
os << std::setw(16) << std::setfill('0') << std::hex << high << low; os << std::setw(16) << std::setfill('0') << std::hex << high << low;
return String(os.str()); return String(os.str());
} }

View File

@ -308,6 +308,7 @@ struct ODBCBridgeMixin
path.setFileName("clickhouse-odbc-bridge"); path.setFileName("clickhouse-odbc-bridge");
std::stringstream command; std::stringstream command;
command.exceptions(std::ios::failbit);
#if !CLICKHOUSE_SPLIT_BINARY #if !CLICKHOUSE_SPLIT_BINARY
cmd_args.push_back("odbc-bridge"); cmd_args.push_back("odbc-bridge");

View File

@ -219,6 +219,7 @@ std::pair<ResponsePtr, Undo> TestKeeperCreateRequest::process(TestKeeper::Contai
++it->second.seq_num; ++it->second.seq_num;
std::stringstream seq_num_str; std::stringstream seq_num_str;
seq_num_str.exceptions(std::ios::failbit);
seq_num_str << std::setw(10) << std::setfill('0') << seq_num; seq_num_str << std::setw(10) << std::setfill('0') << seq_num;
path_created += seq_num_str.str(); path_created += seq_num_str.str();

View File

@ -81,6 +81,7 @@ __attribute__((__weak__)) void checkStackSize()
if (stack_size * 2 > max_stack_size) if (stack_size * 2 > max_stack_size)
{ {
std::stringstream message; std::stringstream message;
message.exceptions(std::ios::failbit);
message << "Stack size too large" message << "Stack size too large"
<< ". Stack address: " << stack_address << ". Stack address: " << stack_address
<< ", frame address: " << frame_address << ", frame address: " << frame_address

View File

@ -3,7 +3,6 @@
#include <re2/stringpiece.h> #include <re2/stringpiece.h>
#include <algorithm> #include <algorithm>
#include <sstream> #include <sstream>
#include <cassert>
#include <iomanip> #include <iomanip>
@ -20,6 +19,7 @@ namespace DB
std::string makeRegexpPatternFromGlobs(const std::string & initial_str_with_globs) std::string makeRegexpPatternFromGlobs(const std::string & initial_str_with_globs)
{ {
std::ostringstream oss_for_escaping; std::ostringstream oss_for_escaping;
oss_for_escaping.exceptions(std::ios::failbit);
/// Escaping only characters that not used in glob syntax /// Escaping only characters that not used in glob syntax
for (const auto & letter : initial_str_with_globs) for (const auto & letter : initial_str_with_globs)
{ {
@ -33,6 +33,7 @@ std::string makeRegexpPatternFromGlobs(const std::string & initial_str_with_glob
re2::StringPiece input(escaped_with_globs); re2::StringPiece input(escaped_with_globs);
re2::StringPiece matched; re2::StringPiece matched;
std::ostringstream oss_for_replacing; std::ostringstream oss_for_replacing;
oss_for_replacing.exceptions(std::ios::failbit);
size_t current_index = 0; size_t current_index = 0;
while (RE2::FindAndConsume(&input, enum_or_range, &matched)) while (RE2::FindAndConsume(&input, enum_or_range, &matched))
{ {
@ -45,8 +46,8 @@ std::string makeRegexpPatternFromGlobs(const std::string & initial_str_with_glob
size_t range_end = 0; size_t range_end = 0;
char point; char point;
std::istringstream iss_range(buffer); std::istringstream iss_range(buffer);
iss_range.exceptions(std::ios::failbit);
iss_range >> range_begin >> point >> point >> range_end; iss_range >> range_begin >> point >> point >> range_end;
assert(!iss_range.fail());
bool leading_zeros = buffer[0] == '0'; bool leading_zeros = buffer[0] == '0';
size_t num_len = std::to_string(range_end).size(); size_t num_len = std::to_string(range_end).size();
if (leading_zeros) if (leading_zeros)
@ -71,6 +72,7 @@ std::string makeRegexpPatternFromGlobs(const std::string & initial_str_with_glob
oss_for_replacing << escaped_with_globs.substr(current_index); oss_for_replacing << escaped_with_globs.substr(current_index);
std::string almost_res = oss_for_replacing.str(); std::string almost_res = oss_for_replacing.str();
std::ostringstream oss_final_processing; std::ostringstream oss_final_processing;
oss_final_processing.exceptions(std::ios::failbit);
for (const auto & letter : almost_res) for (const auto & letter : almost_res)
{ {
if ((letter == '?') || (letter == '*')) if ((letter == '?') || (letter == '*'))

View File

@ -0,0 +1,15 @@
#pragma once
#include <Functions/registerFunctions.h>
#include <Formats/registerFormats.h>
inline void tryRegisterFunctions()
{
static struct Register { Register() { DB::registerFunctions(); } } registered;
}
inline void tryRegisterFormats()
{
static struct Register { Register() { DB::registerFormats(); } } registered;
}

View File

@ -1,18 +0,0 @@
#pragma once
#include <Functions/FunctionFactory.h>
#include <Functions/registerFunctions.h>
struct RegisteredFunctionsState
{
RegisteredFunctionsState()
{
DB::registerFunctions();
}
RegisteredFunctionsState(RegisteredFunctionsState &&) = default;
};
inline void tryRegisterFunctions()
{
static RegisteredFunctionsState registered_functions_state;
}

View File

@ -165,6 +165,7 @@ TEST(Common, SensitiveDataMasker)
</rule> </rule>
</query_masking_rules> </query_masking_rules>
</clickhouse>)END"); </clickhouse>)END");
Poco::AutoPtr<Poco::Util::XMLConfiguration> xml_config = new Poco::Util::XMLConfiguration(xml_isteam_bad); Poco::AutoPtr<Poco::Util::XMLConfiguration> xml_config = new Poco::Util::XMLConfiguration(xml_isteam_bad);
DB::SensitiveDataMasker masker_xml_based_exception_check(*xml_config, "query_masking_rules"); DB::SensitiveDataMasker masker_xml_based_exception_check(*xml_config, "query_masking_rules");

View File

@ -52,6 +52,7 @@ int main(int, char **)
if (x != i) if (x != i)
{ {
std::stringstream s; std::stringstream s;
s.exceptions(std::ios::failbit);
s << "Failed!, read: " << x << ", expected: " << i; s << "Failed!, read: " << x << ", expected: " << i;
throw DB::Exception(s.str(), 0); throw DB::Exception(s.str(), 0);
} }

View File

@ -22,6 +22,7 @@ void IMySQLReadPacket::readPayload(ReadBuffer & in, uint8_t & sequence_id)
if (!payload.eof()) if (!payload.eof())
{ {
std::stringstream tmp; std::stringstream tmp;
tmp.exceptions(std::ios::failbit);
tmp << "Packet payload is not fully read. Stopped after " << payload.count() << " bytes, while " << payload.available() << " bytes are in buffer."; tmp << "Packet payload is not fully read. Stopped after " << payload.count() << " bytes, while " << payload.available() << " bytes are in buffer.";
throw Exception(tmp.str(), ErrorCodes::UNKNOWN_PACKET_FROM_CLIENT); throw Exception(tmp.str(), ErrorCodes::UNKNOWN_PACKET_FROM_CLIENT);
} }

View File

@ -16,6 +16,7 @@ void IMySQLWritePacket::writePayload(WriteBuffer & buffer, uint8_t & sequence_id
if (buf.remainingPayloadSize()) if (buf.remainingPayloadSize())
{ {
std::stringstream ss; std::stringstream ss;
ss.exceptions(std::ios::failbit);
ss << "Incomplete payload. Written " << getPayloadSize() - buf.remainingPayloadSize() << " bytes, expected " << getPayloadSize() << " bytes."; ss << "Incomplete payload. Written " << getPayloadSize() - buf.remainingPayloadSize() << " bytes, expected " << getPayloadSize() << " bytes.";
throw Exception(ss.str(), 0); throw Exception(ss.str(), 0);
} }

View File

@ -374,9 +374,8 @@ class IColumn;
M(Bool, optimize_monotonous_functions_in_order_by, true, "Replace monotonous function with its argument in ORDER BY", 0) \ M(Bool, optimize_monotonous_functions_in_order_by, true, "Replace monotonous function with its argument in ORDER BY", 0) \
M(Bool, allow_experimental_alter_materialized_view_structure, false, "Allow atomic alter on Materialized views. Work in progress.", 0) \ M(Bool, allow_experimental_alter_materialized_view_structure, false, "Allow atomic alter on Materialized views. Work in progress.", 0) \
M(Bool, enable_early_constant_folding, true, "Enable query optimization where we analyze function and subqueries results and rewrite query if there're constants there", 0) \ M(Bool, enable_early_constant_folding, true, "Enable query optimization where we analyze function and subqueries results and rewrite query if there're constants there", 0) \
\
M(Bool, deduplicate_blocks_in_dependent_materialized_views, false, "Should deduplicate blocks for materialized views if the block is not a duplicate for the table. Use true to always deduplicate in dependent tables.", 0) \ M(Bool, deduplicate_blocks_in_dependent_materialized_views, false, "Should deduplicate blocks for materialized views if the block is not a duplicate for the table. Use true to always deduplicate in dependent tables.", 0) \
M(Bool, use_compact_format_in_distributed_parts_names, false, "Changes format of directories names for distributed table insert parts.", 0) \ M(Bool, use_compact_format_in_distributed_parts_names, true, "Changes format of directories names for distributed table insert parts.", 0) \
M(Bool, validate_polygons, true, "Throw exception if polygon is invalid in function pointInPolygon (e.g. self-tangent, self-intersecting). If the setting is false, the function will accept invalid polygons but may silently return wrong result.", 0) \ M(Bool, validate_polygons, true, "Throw exception if polygon is invalid in function pointInPolygon (e.g. self-tangent, self-intersecting). If the setting is false, the function will accept invalid polygons but may silently return wrong result.", 0) \
M(UInt64, max_parser_depth, DBMS_DEFAULT_MAX_PARSER_DEPTH, "Maximum parser depth (recursion depth of recursive descend parser).", 0) \ M(UInt64, max_parser_depth, DBMS_DEFAULT_MAX_PARSER_DEPTH, "Maximum parser depth (recursion depth of recursive descend parser).", 0) \
M(Seconds, temporary_live_view_timeout, DEFAULT_TEMPORARY_LIVE_VIEW_TIMEOUT_SEC, "Timeout after which temporary live view is deleted.", 0) \ M(Seconds, temporary_live_view_timeout, DEFAULT_TEMPORARY_LIVE_VIEW_TIMEOUT_SEC, "Timeout after which temporary live view is deleted.", 0) \
@ -385,7 +384,6 @@ class IColumn;
M(Seconds, lock_acquire_timeout, DBMS_DEFAULT_LOCK_ACQUIRE_TIMEOUT_SEC, "How long locking request should wait before failing", 0) \ M(Seconds, lock_acquire_timeout, DBMS_DEFAULT_LOCK_ACQUIRE_TIMEOUT_SEC, "How long locking request should wait before failing", 0) \
M(Bool, materialize_ttl_after_modify, true, "Apply TTL for old data, after ALTER MODIFY TTL query", 0) \ M(Bool, materialize_ttl_after_modify, true, "Apply TTL for old data, after ALTER MODIFY TTL query", 0) \
M(String, function_implementation, "", "Choose function implementation for specific target or variant (experimental). If empty enable all of them.", 0) \ M(String, function_implementation, "", "Choose function implementation for specific target or variant (experimental). If empty enable all of them.", 0) \
\
M(Bool, allow_experimental_geo_types, false, "Allow geo data types such as Point, Ring, Polygon, MultiPolygon", 0) \ M(Bool, allow_experimental_geo_types, false, "Allow geo data types such as Point, Ring, Polygon, MultiPolygon", 0) \
M(Bool, allow_experimental_bigint_types, false, "Allow Int128, Int256, UInt256 and Decimal256 types", 0) \ M(Bool, allow_experimental_bigint_types, false, "Allow Int128, Int256, UInt256 and Decimal256 types", 0) \
M(Bool, data_type_default_nullable, false, "Data types without NULL or NOT NULL will make Nullable", 0) \ M(Bool, data_type_default_nullable, false, "Data types without NULL or NOT NULL will make Nullable", 0) \
@ -394,20 +392,18 @@ class IColumn;
M(Bool, allow_experimental_database_materialize_mysql, false, "Allow to create database with Engine=MaterializeMySQL(...).", 0) \ M(Bool, allow_experimental_database_materialize_mysql, false, "Allow to create database with Engine=MaterializeMySQL(...).", 0) \
M(Bool, system_events_show_zero_values, false, "Include all metrics, even with zero values", 0) \ M(Bool, system_events_show_zero_values, false, "Include all metrics, even with zero values", 0) \
M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precision are seen as String on ClickHouse's side.", 0) \ M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precision are seen as String on ClickHouse's side.", 0) \
M(Bool, optimize_trivial_insert_select, true, "Optimize trivial 'INSERT INTO table SELECT ... FROM TABLES' query", 0) \
M(Bool, allow_non_metadata_alters, true, "Allow to execute alters which affects not only tables metadata, but also data on disk", 0) \
M(Bool, enable_global_with_statement, false, "Propagate WITH statements to UNION queries and all subqueries", 0) \
M(Bool, aggregate_functions_null_for_empty, false, "Rewrite all aggregate functions in a query, adding -OrNull suffix to them", 0) \
\ \
/** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \ /** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \
\ \
M(Bool, partial_merge_join, false, "Obsolete. Use join_algorithm='prefer_partial_merge' instead.", 0) \
M(UInt64, max_memory_usage_for_all_queries, 0, "Obsolete. Will be removed after 2020-10-20", 0) \
M(UInt64, multiple_joins_rewriter_version, 0, "Obsolete setting, does nothing. Will be removed after 2021-03-31", 0) \ M(UInt64, multiple_joins_rewriter_version, 0, "Obsolete setting, does nothing. Will be removed after 2021-03-31", 0) \
\
M(Bool, force_optimize_skip_unused_shards_no_nested, false, "Obsolete setting, does nothing. Will be removed after 2020-12-01. Use force_optimize_skip_unused_shards_nesting instead.", 0) \
M(Bool, experimental_use_processors, true, "Obsolete setting, does nothing. Will be removed after 2020-11-29.", 0) \ M(Bool, experimental_use_processors, true, "Obsolete setting, does nothing. Will be removed after 2020-11-29.", 0) \
M(Bool, optimize_trivial_insert_select, true, "Optimize trivial 'INSERT INTO table SELECT ... FROM TABLES' query", 0) \ M(Bool, force_optimize_skip_unused_shards_no_nested, false, "Obsolete setting, does nothing. Will be removed after 2020-12-01. Use force_optimize_skip_unused_shards_nesting instead.", 0) \
M(Bool, enable_debug_queries, false, "Enabled debug queries, but now is obsolete", 0) \
M(Bool, allow_experimental_database_atomic, true, "Obsolete setting, does nothing. Will be removed after 2021-02-12", 0) \ M(Bool, allow_experimental_database_atomic, true, "Obsolete setting, does nothing. Will be removed after 2021-02-12", 0) \
M(Bool, allow_non_metadata_alters, true, "Allow to execute alters which affects not only tables metadata, but also data on disk", 0) \
M(Bool, enable_global_with_statement, false, "Propagate WITH statements to UNION queries and all subqueries", 0) \
M(Bool, enable_debug_queries, false, "Enabled debug queries, but now is obsolete", 0)
// End of COMMON_SETTINGS // End of COMMON_SETTINGS
// Please add settings related to formats into the FORMAT_FACTORY_SETTINGS below. // Please add settings related to formats into the FORMAT_FACTORY_SETTINGS below.

View File

@ -96,7 +96,7 @@ struct SortCursorImpl
: column_desc.column_number; : column_desc.column_number;
sort_columns.push_back(columns[column_number].get()); sort_columns.push_back(columns[column_number].get());
need_collation[j] = desc[j].collator != nullptr && typeid_cast<const ColumnString *>(sort_columns.back()); /// TODO Nullable(String) need_collation[j] = desc[j].collator != nullptr && sort_columns.back()->isCollationSupported(); /// TODO Nullable(String)
has_collation |= need_collation[j]; has_collation |= need_collation[j];
} }
@ -201,10 +201,7 @@ struct SortCursorWithCollation : SortCursorHelper<SortCursorWithCollation>
int nulls_direction = desc.nulls_direction; int nulls_direction = desc.nulls_direction;
int res; int res;
if (impl->need_collation[i]) if (impl->need_collation[i])
{ res = impl->sort_columns[i]->compareAtWithCollation(lhs_pos, rhs_pos, *(rhs.impl->sort_columns[i]), nulls_direction, *impl->desc[i].collator);
const ColumnString & column_string = assert_cast<const ColumnString &>(*impl->sort_columns[i]);
res = column_string.compareAtWithCollation(lhs_pos, rhs_pos, *(rhs.impl->sort_columns[i]), *impl->desc[i].collator);
}
else else
res = impl->sort_columns[i]->compareAt(lhs_pos, rhs_pos, *(rhs.impl->sort_columns[i]), nulls_direction); res = impl->sort_columns[i]->compareAt(lhs_pos, rhs_pos, *(rhs.impl->sort_columns[i]), nulls_direction);

View File

@ -61,6 +61,7 @@ struct SortColumnDescription
std::string dump() const std::string dump() const
{ {
std::stringstream ss; std::stringstream ss;
ss.exceptions(std::ios::failbit);
ss << column_name << ":" << column_number << ":dir " << direction << "nulls " << nulls_direction; ss << column_name << ":" << column_number << ":dir " << direction << "nulls " << nulls_direction;
return ss.str(); return ss.str();
} }

View File

@ -60,6 +60,7 @@ void CheckConstraintsBlockOutputStream::write(const Block & block)
if (!value) if (!value)
{ {
std::stringstream exception_message; std::stringstream exception_message;
exception_message.exceptions(std::ios::failbit);
exception_message << "Constraint " << backQuote(constraint_ptr->name) exception_message << "Constraint " << backQuote(constraint_ptr->name)
<< " for table " << table_id.getNameForLogs() << " for table " << table_id.getNameForLogs()
@ -87,6 +88,7 @@ void CheckConstraintsBlockOutputStream::write(const Block & block)
Names related_columns = constraint_expr->getRequiredColumns(); Names related_columns = constraint_expr->getRequiredColumns();
std::stringstream exception_message; std::stringstream exception_message;
exception_message.exceptions(std::ios::failbit);
exception_message << "Constraint " << backQuote(constraint_ptr->name) exception_message << "Constraint " << backQuote(constraint_ptr->name)
<< " for table " << table_id.getNameForLogs() << " for table " << table_id.getNameForLogs()

View File

@ -360,6 +360,7 @@ Block IBlockInputStream::getExtremes()
String IBlockInputStream::getTreeID() const String IBlockInputStream::getTreeID() const
{ {
std::stringstream s; std::stringstream s;
s.exceptions(std::ios::failbit);
s << getName(); s << getName();
if (!children.empty()) if (!children.empty())

View File

@ -5,6 +5,7 @@
#include <Processors/Pipe.h> #include <Processors/Pipe.h>
#include <Processors/Sources/SourceFromSingleChunk.h> #include <Processors/Sources/SourceFromSingleChunk.h>
#include <Storages/IStorage.h> #include <Storages/IStorage.h>
#include <Storages/SelectQueryInfo.h>
#include <Interpreters/castColumn.h> #include <Interpreters/castColumn.h>
#include <Interpreters/Cluster.h> #include <Interpreters/Cluster.h>
#include <Interpreters/InternalTextLogsQueue.h> #include <Interpreters/InternalTextLogsQueue.h>
@ -314,6 +315,8 @@ void RemoteQueryExecutor::sendScalars()
void RemoteQueryExecutor::sendExternalTables() void RemoteQueryExecutor::sendExternalTables()
{ {
SelectQueryInfo query_info;
size_t count = multiplexed_connections->size(); size_t count = multiplexed_connections->size();
{ {
@ -328,11 +331,12 @@ void RemoteQueryExecutor::sendExternalTables()
{ {
StoragePtr cur = table.second; StoragePtr cur = table.second;
auto metadata_snapshot = cur->getInMemoryMetadataPtr(); auto metadata_snapshot = cur->getInMemoryMetadataPtr();
QueryProcessingStage::Enum read_from_table_stage = cur->getQueryProcessingStage(context); QueryProcessingStage::Enum read_from_table_stage = cur->getQueryProcessingStage(
context, QueryProcessingStage::Complete, query_info);
Pipe pipe = cur->read( Pipe pipe = cur->read(
metadata_snapshot->getColumns().getNamesOfPhysical(), metadata_snapshot->getColumns().getNamesOfPhysical(),
metadata_snapshot, {}, context, metadata_snapshot, query_info, context,
read_from_table_stage, DEFAULT_BLOCK_SIZE, 1); read_from_table_stage, DEFAULT_BLOCK_SIZE, 1);
auto data = std::make_unique<ExternalTableData>(); auto data = std::make_unique<ExternalTableData>();

View File

@ -33,6 +33,7 @@ static const std::vector<String> supported_functions{"any", "anyLast", "min",
String DataTypeCustomSimpleAggregateFunction::getName() const String DataTypeCustomSimpleAggregateFunction::getName() const
{ {
std::stringstream stream; std::stringstream stream;
stream.exceptions(std::ios::failbit);
stream << "SimpleAggregateFunction(" << function->getName(); stream << "SimpleAggregateFunction(" << function->getName();
if (!parameters.empty()) if (!parameters.empty())

View File

@ -30,6 +30,7 @@ template <typename T>
std::string DataTypeDecimal<T>::doGetName() const std::string DataTypeDecimal<T>::doGetName() const
{ {
std::stringstream ss; std::stringstream ss;
ss.exceptions(std::ios::failbit);
ss << "Decimal(" << this->precision << ", " << this->scale << ")"; ss << "Decimal(" << this->precision << ", " << this->scale << ")";
return ss.str(); return ss.str();
} }

View File

@ -95,6 +95,7 @@ String getObjectDefinitionFromCreateQuery(const ASTPtr & query)
if (!create) if (!create)
{ {
std::ostringstream query_stream; std::ostringstream query_stream;
query_stream.exceptions(std::ios::failbit);
formatAST(*query, query_stream, true); formatAST(*query, query_stream, true);
throw Exception("Query '" + query_stream.str() + "' is not CREATE query", ErrorCodes::LOGICAL_ERROR); throw Exception("Query '" + query_stream.str() + "' is not CREATE query", ErrorCodes::LOGICAL_ERROR);
} }
@ -121,6 +122,7 @@ String getObjectDefinitionFromCreateQuery(const ASTPtr & query)
create->table = TABLE_WITH_UUID_NAME_PLACEHOLDER; create->table = TABLE_WITH_UUID_NAME_PLACEHOLDER;
std::ostringstream statement_stream; std::ostringstream statement_stream;
statement_stream.exceptions(std::ios::failbit);
formatAST(*create, statement_stream, false); formatAST(*create, statement_stream, false);
statement_stream << '\n'; statement_stream << '\n';
return statement_stream.str(); return statement_stream.str();

View File

@ -128,6 +128,7 @@ static String checkVariableAndGetVersion(const mysqlxx::Pool::Entry & connection
bool first = true; bool first = true;
std::stringstream error_message; std::stringstream error_message;
error_message.exceptions(std::ios::failbit);
error_message << "Illegal MySQL variables, the MaterializeMySQL engine requires "; error_message << "Illegal MySQL variables, the MaterializeMySQL engine requires ";
for (const auto & [variable_name, variable_error_message] : variables_error_message) for (const auto & [variable_name, variable_error_message] : variables_error_message)
{ {
@ -239,6 +240,7 @@ static inline BlockOutputStreamPtr getTableOutput(const String & database_name,
const StoragePtr & storage = DatabaseCatalog::instance().getTable(StorageID(database_name, table_name), query_context); const StoragePtr & storage = DatabaseCatalog::instance().getTable(StorageID(database_name, table_name), query_context);
std::stringstream insert_columns_str; std::stringstream insert_columns_str;
insert_columns_str.exceptions(std::ios::failbit);
const StorageInMemoryMetadata & storage_metadata = storage->getInMemoryMetadata(); const StorageInMemoryMetadata & storage_metadata = storage->getInMemoryMetadata();
const ColumnsDescription & storage_columns = storage_metadata.getColumns(); const ColumnsDescription & storage_columns = storage_metadata.getColumns();
const NamesAndTypesList & insert_columns_names = insert_materialized ? storage_columns.getAllPhysical() : storage_columns.getOrdinary(); const NamesAndTypesList & insert_columns_names = insert_materialized ? storage_columns.getAllPhysical() : storage_columns.getOrdinary();
@ -330,6 +332,7 @@ std::optional<MaterializeMetadata> MaterializeMySQLSyncThread::prepareSynchroniz
const auto & position_message = [&]() const auto & position_message = [&]()
{ {
std::stringstream ss; std::stringstream ss;
ss.exceptions(std::ios::failbit);
position.dump(ss); position.dump(ss);
return ss.str(); return ss.str();
}; };
@ -372,6 +375,7 @@ void MaterializeMySQLSyncThread::flushBuffersData(Buffers & buffers, Materialize
const auto & position_message = [&]() const auto & position_message = [&]()
{ {
std::stringstream ss; std::stringstream ss;
ss.exceptions(std::ios::failbit);
client.getPosition().dump(ss); client.getPosition().dump(ss);
return ss.str(); return ss.str();
}; };
@ -643,6 +647,7 @@ void MaterializeMySQLSyncThread::onEvent(Buffers & buffers, const BinlogEventPtr
const auto & dump_event_message = [&]() const auto & dump_event_message = [&]()
{ {
std::stringstream ss; std::stringstream ss;
ss.exceptions(std::ios::failbit);
receive_event->dump(ss); receive_event->dump(ss);
return ss.str(); return ss.str();
}; };

View File

@ -16,6 +16,8 @@
#include <common/StringRef.h> #include <common/StringRef.h>
#include <ext/bit_cast.h> #include <ext/bit_cast.h>
#include <ext/map.h> #include <ext/map.h>
#include <ext/range.h>
#include <ext/size.h>
#include <ext/scope_guard.h> #include <ext/scope_guard.h>
#include "DictionaryStructure.h" #include "DictionaryStructure.h"
#include "IDictionary.h" #include "IDictionary.h"

View File

@ -231,6 +231,7 @@ std::string DictionaryStructure::getKeyDescription() const
return "UInt64"; return "UInt64";
std::ostringstream out; std::ostringstream out;
out.exceptions(std::ios::failbit);
out << '('; out << '(';

View File

@ -19,6 +19,7 @@ static std::string configurationToString(const DictionaryConfigurationPtr & conf
{ {
const Poco::Util::XMLConfiguration * xml_config = dynamic_cast<const Poco::Util::XMLConfiguration *>(config.get()); const Poco::Util::XMLConfiguration * xml_config = dynamic_cast<const Poco::Util::XMLConfiguration *>(config.get());
std::ostringstream oss; std::ostringstream oss;
oss.exceptions(std::ios::failbit);
xml_config->save(oss); xml_config->save(oss);
return oss.str(); return oss.str();
} }

View File

@ -333,150 +333,6 @@ void FormatFactory::registerFileSegmentationEngine(const String & name, FileSegm
target = std::move(file_segmentation_engine); target = std::move(file_segmentation_engine);
} }
/// File Segmentation Engines for parallel reading
void registerFileSegmentationEngineTabSeparated(FormatFactory & factory);
void registerFileSegmentationEngineCSV(FormatFactory & factory);
void registerFileSegmentationEngineJSONEachRow(FormatFactory & factory);
void registerFileSegmentationEngineRegexp(FormatFactory & factory);
void registerFileSegmentationEngineJSONAsString(FormatFactory & factory);
void registerFileSegmentationEngineLineAsString(FormatFactory & factory);
/// Formats for both input/output.
void registerInputFormatNative(FormatFactory & factory);
void registerOutputFormatNative(FormatFactory & factory);
void registerInputFormatProcessorNative(FormatFactory & factory);
void registerOutputFormatProcessorNative(FormatFactory & factory);
void registerInputFormatProcessorRowBinary(FormatFactory & factory);
void registerOutputFormatProcessorRowBinary(FormatFactory & factory);
void registerInputFormatProcessorTabSeparated(FormatFactory & factory);
void registerOutputFormatProcessorTabSeparated(FormatFactory & factory);
void registerInputFormatProcessorValues(FormatFactory & factory);
void registerOutputFormatProcessorValues(FormatFactory & factory);
void registerInputFormatProcessorCSV(FormatFactory & factory);
void registerOutputFormatProcessorCSV(FormatFactory & factory);
void registerInputFormatProcessorTSKV(FormatFactory & factory);
void registerOutputFormatProcessorTSKV(FormatFactory & factory);
void registerInputFormatProcessorJSONEachRow(FormatFactory & factory);
void registerOutputFormatProcessorJSONEachRow(FormatFactory & factory);
void registerInputFormatProcessorJSONCompactEachRow(FormatFactory & factory);
void registerOutputFormatProcessorJSONCompactEachRow(FormatFactory & factory);
void registerInputFormatProcessorProtobuf(FormatFactory & factory);
void registerOutputFormatProcessorProtobuf(FormatFactory & factory);
void registerInputFormatProcessorTemplate(FormatFactory & factory);
void registerOutputFormatProcessorTemplate(FormatFactory & factory);
void registerInputFormatProcessorMsgPack(FormatFactory & factory);
void registerOutputFormatProcessorMsgPack(FormatFactory & factory);
void registerInputFormatProcessorORC(FormatFactory & factory);
void registerOutputFormatProcessorORC(FormatFactory & factory);
void registerInputFormatProcessorParquet(FormatFactory & factory);
void registerOutputFormatProcessorParquet(FormatFactory & factory);
void registerInputFormatProcessorArrow(FormatFactory & factory);
void registerOutputFormatProcessorArrow(FormatFactory & factory);
void registerInputFormatProcessorAvro(FormatFactory & factory);
void registerOutputFormatProcessorAvro(FormatFactory & factory);
void registerInputFormatProcessorRawBLOB(FormatFactory & factory);
void registerOutputFormatProcessorRawBLOB(FormatFactory & factory);
/// Output only (presentational) formats.
void registerOutputFormatNull(FormatFactory & factory);
void registerOutputFormatProcessorPretty(FormatFactory & factory);
void registerOutputFormatProcessorPrettyCompact(FormatFactory & factory);
void registerOutputFormatProcessorPrettySpace(FormatFactory & factory);
void registerOutputFormatProcessorVertical(FormatFactory & factory);
void registerOutputFormatProcessorJSON(FormatFactory & factory);
void registerOutputFormatProcessorJSONCompact(FormatFactory & factory);
void registerOutputFormatProcessorJSONEachRowWithProgress(FormatFactory & factory);
void registerOutputFormatProcessorXML(FormatFactory & factory);
void registerOutputFormatProcessorODBCDriver2(FormatFactory & factory);
void registerOutputFormatProcessorNull(FormatFactory & factory);
void registerOutputFormatProcessorMySQLWire(FormatFactory & factory);
void registerOutputFormatProcessorMarkdown(FormatFactory & factory);
void registerOutputFormatProcessorPostgreSQLWire(FormatFactory & factory);
/// Input only formats.
void registerInputFormatProcessorRegexp(FormatFactory & factory);
void registerInputFormatProcessorJSONAsString(FormatFactory & factory);
void registerInputFormatProcessorLineAsString(FormatFactory & factory);
void registerInputFormatProcessorCapnProto(FormatFactory & factory);
FormatFactory::FormatFactory()
{
registerFileSegmentationEngineTabSeparated(*this);
registerFileSegmentationEngineCSV(*this);
registerFileSegmentationEngineJSONEachRow(*this);
registerFileSegmentationEngineRegexp(*this);
registerFileSegmentationEngineJSONAsString(*this);
registerFileSegmentationEngineLineAsString(*this);
registerInputFormatNative(*this);
registerOutputFormatNative(*this);
registerInputFormatProcessorNative(*this);
registerOutputFormatProcessorNative(*this);
registerInputFormatProcessorRowBinary(*this);
registerOutputFormatProcessorRowBinary(*this);
registerInputFormatProcessorTabSeparated(*this);
registerOutputFormatProcessorTabSeparated(*this);
registerInputFormatProcessorValues(*this);
registerOutputFormatProcessorValues(*this);
registerInputFormatProcessorCSV(*this);
registerOutputFormatProcessorCSV(*this);
registerInputFormatProcessorTSKV(*this);
registerOutputFormatProcessorTSKV(*this);
registerInputFormatProcessorJSONEachRow(*this);
registerOutputFormatProcessorJSONEachRow(*this);
registerInputFormatProcessorJSONCompactEachRow(*this);
registerOutputFormatProcessorJSONCompactEachRow(*this);
registerInputFormatProcessorProtobuf(*this);
registerOutputFormatProcessorProtobuf(*this);
registerInputFormatProcessorTemplate(*this);
registerOutputFormatProcessorTemplate(*this);
registerInputFormatProcessorMsgPack(*this);
registerOutputFormatProcessorMsgPack(*this);
registerInputFormatProcessorRawBLOB(*this);
registerOutputFormatProcessorRawBLOB(*this);
#if !defined(ARCADIA_BUILD)
registerInputFormatProcessorORC(*this);
registerOutputFormatProcessorORC(*this);
registerInputFormatProcessorParquet(*this);
registerOutputFormatProcessorParquet(*this);
registerInputFormatProcessorArrow(*this);
registerOutputFormatProcessorArrow(*this);
registerInputFormatProcessorAvro(*this);
registerOutputFormatProcessorAvro(*this);
#endif
registerOutputFormatNull(*this);
registerOutputFormatProcessorPretty(*this);
registerOutputFormatProcessorPrettyCompact(*this);
registerOutputFormatProcessorPrettySpace(*this);
registerOutputFormatProcessorVertical(*this);
registerOutputFormatProcessorJSON(*this);
registerOutputFormatProcessorJSONCompact(*this);
registerOutputFormatProcessorJSONEachRowWithProgress(*this);
registerOutputFormatProcessorXML(*this);
registerOutputFormatProcessorODBCDriver2(*this);
registerOutputFormatProcessorNull(*this);
registerOutputFormatProcessorMySQLWire(*this);
registerOutputFormatProcessorMarkdown(*this);
registerOutputFormatProcessorPostgreSQLWire(*this);
registerInputFormatProcessorRegexp(*this);
registerInputFormatProcessorJSONAsString(*this);
registerInputFormatProcessorLineAsString(*this);
#if !defined(ARCADIA_BUILD)
registerInputFormatProcessorCapnProto(*this);
#endif
}
FormatFactory & FormatFactory::instance() FormatFactory & FormatFactory::instance()
{ {

View File

@ -96,7 +96,6 @@ private:
using FormatsDictionary = std::unordered_map<String, Creators>; using FormatsDictionary = std::unordered_map<String, Creators>;
public: public:
static FormatFactory & instance(); static FormatFactory & instance();
BlockInputStreamPtr getInput( BlockInputStreamPtr getInput(
@ -137,8 +136,6 @@ public:
private: private:
FormatsDictionary dict; FormatsDictionary dict;
FormatFactory();
const Creators & getCreators(const String & name) const; const Creators & getCreators(const String & name) const;
}; };

View File

@ -0,0 +1,160 @@
#if !defined(ARCADIA_BUILD)
# include <Common/config.h>
#endif
#include <Formats/FormatFactory.h>
namespace DB
{
/// File Segmentation Engines for parallel reading
void registerFileSegmentationEngineTabSeparated(FormatFactory & factory);
void registerFileSegmentationEngineCSV(FormatFactory & factory);
void registerFileSegmentationEngineJSONEachRow(FormatFactory & factory);
void registerFileSegmentationEngineRegexp(FormatFactory & factory);
void registerFileSegmentationEngineJSONAsString(FormatFactory & factory);
void registerFileSegmentationEngineLineAsString(FormatFactory & factory);
/// Formats for both input/output.
void registerInputFormatNative(FormatFactory & factory);
void registerOutputFormatNative(FormatFactory & factory);
void registerInputFormatProcessorNative(FormatFactory & factory);
void registerOutputFormatProcessorNative(FormatFactory & factory);
void registerInputFormatProcessorRowBinary(FormatFactory & factory);
void registerOutputFormatProcessorRowBinary(FormatFactory & factory);
void registerInputFormatProcessorTabSeparated(FormatFactory & factory);
void registerOutputFormatProcessorTabSeparated(FormatFactory & factory);
void registerInputFormatProcessorValues(FormatFactory & factory);
void registerOutputFormatProcessorValues(FormatFactory & factory);
void registerInputFormatProcessorCSV(FormatFactory & factory);
void registerOutputFormatProcessorCSV(FormatFactory & factory);
void registerInputFormatProcessorTSKV(FormatFactory & factory);
void registerOutputFormatProcessorTSKV(FormatFactory & factory);
void registerInputFormatProcessorJSONEachRow(FormatFactory & factory);
void registerOutputFormatProcessorJSONEachRow(FormatFactory & factory);
void registerInputFormatProcessorJSONCompactEachRow(FormatFactory & factory);
void registerOutputFormatProcessorJSONCompactEachRow(FormatFactory & factory);
void registerInputFormatProcessorProtobuf(FormatFactory & factory);
void registerOutputFormatProcessorProtobuf(FormatFactory & factory);
void registerInputFormatProcessorTemplate(FormatFactory & factory);
void registerOutputFormatProcessorTemplate(FormatFactory & factory);
void registerInputFormatProcessorMsgPack(FormatFactory & factory);
void registerOutputFormatProcessorMsgPack(FormatFactory & factory);
void registerInputFormatProcessorORC(FormatFactory & factory);
void registerOutputFormatProcessorORC(FormatFactory & factory);
void registerInputFormatProcessorParquet(FormatFactory & factory);
void registerOutputFormatProcessorParquet(FormatFactory & factory);
void registerInputFormatProcessorArrow(FormatFactory & factory);
void registerOutputFormatProcessorArrow(FormatFactory & factory);
void registerInputFormatProcessorAvro(FormatFactory & factory);
void registerOutputFormatProcessorAvro(FormatFactory & factory);
void registerInputFormatProcessorRawBLOB(FormatFactory & factory);
void registerOutputFormatProcessorRawBLOB(FormatFactory & factory);
/// Output only (presentational) formats.
void registerOutputFormatNull(FormatFactory & factory);
void registerOutputFormatProcessorPretty(FormatFactory & factory);
void registerOutputFormatProcessorPrettyCompact(FormatFactory & factory);
void registerOutputFormatProcessorPrettySpace(FormatFactory & factory);
void registerOutputFormatProcessorVertical(FormatFactory & factory);
void registerOutputFormatProcessorJSON(FormatFactory & factory);
void registerOutputFormatProcessorJSONCompact(FormatFactory & factory);
void registerOutputFormatProcessorJSONEachRowWithProgress(FormatFactory & factory);
void registerOutputFormatProcessorXML(FormatFactory & factory);
void registerOutputFormatProcessorODBCDriver2(FormatFactory & factory);
void registerOutputFormatProcessorNull(FormatFactory & factory);
void registerOutputFormatProcessorMySQLWire(FormatFactory & factory);
void registerOutputFormatProcessorMarkdown(FormatFactory & factory);
void registerOutputFormatProcessorPostgreSQLWire(FormatFactory & factory);
/// Input only formats.
void registerInputFormatProcessorRegexp(FormatFactory & factory);
void registerInputFormatProcessorJSONAsString(FormatFactory & factory);
void registerInputFormatProcessorLineAsString(FormatFactory & factory);
void registerInputFormatProcessorCapnProto(FormatFactory & factory);
void registerFormats()
{
auto & factory = FormatFactory::instance();
registerFileSegmentationEngineTabSeparated(factory);
registerFileSegmentationEngineCSV(factory);
registerFileSegmentationEngineJSONEachRow(factory);
registerFileSegmentationEngineRegexp(factory);
registerFileSegmentationEngineJSONAsString(factory);
registerFileSegmentationEngineLineAsString(factory);
registerInputFormatNative(factory);
registerOutputFormatNative(factory);
registerInputFormatProcessorNative(factory);
registerOutputFormatProcessorNative(factory);
registerInputFormatProcessorRowBinary(factory);
registerOutputFormatProcessorRowBinary(factory);
registerInputFormatProcessorTabSeparated(factory);
registerOutputFormatProcessorTabSeparated(factory);
registerInputFormatProcessorValues(factory);
registerOutputFormatProcessorValues(factory);
registerInputFormatProcessorCSV(factory);
registerOutputFormatProcessorCSV(factory);
registerInputFormatProcessorTSKV(factory);
registerOutputFormatProcessorTSKV(factory);
registerInputFormatProcessorJSONEachRow(factory);
registerOutputFormatProcessorJSONEachRow(factory);
registerInputFormatProcessorJSONCompactEachRow(factory);
registerOutputFormatProcessorJSONCompactEachRow(factory);
registerInputFormatProcessorProtobuf(factory);
registerOutputFormatProcessorProtobuf(factory);
registerInputFormatProcessorTemplate(factory);
registerOutputFormatProcessorTemplate(factory);
registerInputFormatProcessorMsgPack(factory);
registerOutputFormatProcessorMsgPack(factory);
registerInputFormatProcessorRawBLOB(factory);
registerOutputFormatProcessorRawBLOB(factory);
#if !defined(ARCADIA_BUILD)
registerInputFormatProcessorORC(factory);
registerOutputFormatProcessorORC(factory);
registerInputFormatProcessorParquet(factory);
registerOutputFormatProcessorParquet(factory);
registerInputFormatProcessorArrow(factory);
registerOutputFormatProcessorArrow(factory);
registerInputFormatProcessorAvro(factory);
registerOutputFormatProcessorAvro(factory);
#endif
registerOutputFormatNull(factory);
registerOutputFormatProcessorPretty(factory);
registerOutputFormatProcessorPrettyCompact(factory);
registerOutputFormatProcessorPrettySpace(factory);
registerOutputFormatProcessorVertical(factory);
registerOutputFormatProcessorJSON(factory);
registerOutputFormatProcessorJSONCompact(factory);
registerOutputFormatProcessorJSONEachRowWithProgress(factory);
registerOutputFormatProcessorXML(factory);
registerOutputFormatProcessorODBCDriver2(factory);
registerOutputFormatProcessorNull(factory);
registerOutputFormatProcessorMySQLWire(factory);
registerOutputFormatProcessorMarkdown(factory);
registerOutputFormatProcessorPostgreSQLWire(factory);
registerInputFormatProcessorRegexp(factory);
registerInputFormatProcessorJSONAsString(factory);
registerInputFormatProcessorLineAsString(factory);
#if !defined(ARCADIA_BUILD)
registerInputFormatProcessorCapnProto(factory);
#endif
}
}

View File

@ -0,0 +1,9 @@
#pragma once
namespace DB
{
void registerFormats();
}

View File

@ -22,6 +22,7 @@ SRCS(
ProtobufReader.cpp ProtobufReader.cpp
ProtobufSchemas.cpp ProtobufSchemas.cpp
ProtobufWriter.cpp ProtobufWriter.cpp
registerFormats.cpp
verbosePrintString.cpp verbosePrintString.cpp
) )

View File

@ -12,7 +12,7 @@
#include <Functions/FunctionFactory.h> #include <Functions/FunctionFactory.h>
#include <Functions/FunctionHelpers.h> #include <Functions/FunctionHelpers.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>
#include <IO/WriteBufferFromOStream.h> #include <IO/WriteBufferFromString.h>
#define STATS_ENABLE_STDVEC_WRAPPERS #define STATS_ENABLE_STDVEC_WRAPPERS
#include <stats.hpp> #include <stats.hpp>
@ -139,10 +139,8 @@ Variants bayesian_ab_test(String distribution, PODArray<Float64> & xs, PODArray<
String convertToJson(const PODArray<String> & variant_names, const Variants & variants) String convertToJson(const PODArray<String> & variant_names, const Variants & variants)
{ {
FormatSettings settings; FormatSettings settings;
std::stringstream s;
{ WriteBufferFromOwnString buf;
WriteBufferFromOStream buf(s);
writeCString("{\"data\":[", buf); writeCString("{\"data\":[", buf);
for (size_t i = 0; i < variants.size(); ++i) for (size_t i = 0; i < variants.size(); ++i)
@ -158,12 +156,12 @@ String convertToJson(const PODArray<String> & variant_names, const Variants & va
writeCString(",\"to_be_best\":", buf); writeCString(",\"to_be_best\":", buf);
writeText(variants[i].best, buf); writeText(variants[i].best, buf);
writeCString("}", buf); writeCString("}", buf);
if (i != variant_names.size() -1) writeCString(",", buf); if (i != variant_names.size() -1)
writeCString(",", buf);
} }
writeCString("]}", buf); writeCString("]}", buf);
}
return s.str(); return buf.str();
} }
class FunctionBayesAB : public IFunction class FunctionBayesAB : public IFunction

View File

@ -10,39 +10,44 @@ Variants test_bayesab(std::string dist, PODArray<Float64> xs, PODArray<Float64>
{ {
Variants variants; Variants variants;
std::cout << std::fixed; //std::cout << std::fixed;
if (dist == "beta") if (dist == "beta")
{ {
std::cout << dist << "\nclicks: "; /* std::cout << dist << "\nclicks: ";
for (auto x : xs) std::cout << x << " "; for (auto x : xs)
std::cout << x << " ";
std::cout <<"\tconversions: "; std::cout <<"\tconversions: ";
for (auto y : ys) std::cout << y << " "; for (auto y : ys)
std::cout << y << " ";
std::cout << "\n"; std::cout << "\n";*/
variants = bayesian_ab_test<true>(dist, xs, ys); variants = bayesian_ab_test<true>(dist, xs, ys);
} }
else if (dist == "gamma") else if (dist == "gamma")
{ {
std::cout << dist << "\nclicks: "; /* std::cout << dist << "\nclicks: ";
for (auto x : xs) std::cout << x << " "; for (auto x : xs)
std::cout << x << " ";
std::cout <<"\tcost: "; std::cout <<"\tcost: ";
for (auto y : ys) std::cout << y << " "; for (auto y : ys)
std::cout << y << " ";
std::cout << "\n";*/
std::cout << "\n";
variants = bayesian_ab_test<true>(dist, xs, ys); variants = bayesian_ab_test<true>(dist, xs, ys);
} }
for (size_t i = 0; i < variants.size(); ++i) /* for (size_t i = 0; i < variants.size(); ++i)
std::cout << i << " beats 0: " << variants[i].beats_control << std::endl; std::cout << i << " beats 0: " << variants[i].beats_control << std::endl;
for (size_t i = 0; i < variants.size(); ++i) for (size_t i = 0; i < variants.size(); ++i)
std::cout << i << " to be best: " << variants[i].best << std::endl; std::cout << i << " to be best: " << variants[i].best << std::endl;
std::cout << convertToJson({"0", "1", "2"}, variants) << std::endl; std::cout << convertToJson({"0", "1", "2"}, variants) << std::endl;
*/
Float64 max_val = 0.0, min_val = 2.0; Float64 max_val = 0.0, min_val = 2.0;
for (size_t i = 0; i < variants.size(); ++i) for (size_t i = 0; i < variants.size(); ++i)
{ {

View File

@ -20,6 +20,7 @@
# include <Poco/Net/PrivateKeyPassphraseHandler.h> # include <Poco/Net/PrivateKeyPassphraseHandler.h>
# include <Poco/Net/RejectCertificateHandler.h> # include <Poco/Net/RejectCertificateHandler.h>
# include <Poco/Net/SSLManager.h> # include <Poco/Net/SSLManager.h>
# include <Poco/Net/SecureStreamSocket.h>
#endif #endif
#include <Poco/Net/HTTPServerResponse.h> #include <Poco/Net/HTTPServerResponse.h>
@ -68,27 +69,27 @@ namespace
throw Exception("Unsupported scheme in URI '" + uri.toString() + "'", ErrorCodes::UNSUPPORTED_URI_SCHEME); throw Exception("Unsupported scheme in URI '" + uri.toString() + "'", ErrorCodes::UNSUPPORTED_URI_SCHEME);
} }
HTTPSessionPtr makeHTTPSessionImpl(const std::string & host, UInt16 port, bool https, bool keep_alive, bool resolve_host=true) HTTPSessionPtr makeHTTPSessionImpl(const std::string & host, UInt16 port, bool https, bool keep_alive, bool resolve_host = true)
{ {
HTTPSessionPtr session; HTTPSessionPtr session;
if (https) if (https)
{
#if USE_SSL #if USE_SSL
session = std::make_shared<Poco::Net::HTTPSClientSession>(); /// Cannot resolve host in advance, otherwise SNI won't work in Poco.
session = std::make_shared<Poco::Net::HTTPSClientSession>(host, port);
#else #else
throw Exception("ClickHouse was built without HTTPS support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME); throw Exception("ClickHouse was built without HTTPS support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
#endif #endif
}
else else
session = std::make_shared<Poco::Net::HTTPClientSession>(); {
String resolved_host = resolve_host ? DNSResolver::instance().resolveHost(host).toString() : host;
session = std::make_shared<Poco::Net::HTTPClientSession>(resolved_host, port);
}
ProfileEvents::increment(ProfileEvents::CreatedHTTPConnections); ProfileEvents::increment(ProfileEvents::CreatedHTTPConnections);
if (resolve_host)
session->setHost(DNSResolver::instance().resolveHost(host).toString());
else
session->setHost(host);
session->setPort(port);
/// doesn't work properly without patch /// doesn't work properly without patch
#if defined(POCO_CLICKHOUSE_PATCH) #if defined(POCO_CLICKHOUSE_PATCH)
session->setKeepAlive(keep_alive); session->setKeepAlive(keep_alive);
@ -239,6 +240,7 @@ void assertResponseIsOk(const Poco::Net::HTTPRequest & request, Poco::Net::HTTPR
if (!(status == Poco::Net::HTTPResponse::HTTP_OK || (isRedirect(status) && allow_redirects))) if (!(status == Poco::Net::HTTPResponse::HTTP_OK || (isRedirect(status) && allow_redirects)))
{ {
std::stringstream error_message; std::stringstream error_message;
error_message.exceptions(std::ios::failbit);
error_message << "Received error from remote server " << request.getURI() << ". HTTP status code: " << status << " " error_message << "Received error from remote server " << request.getURI() << ". HTTP status code: " << status << " "
<< response.getReason() << ", body: " << istr.rdbuf(); << response.getReason() << ", body: " << istr.rdbuf();

View File

@ -13,6 +13,7 @@
#include <IO/ConnectionTimeouts.h> #include <IO/ConnectionTimeouts.h>
namespace Poco namespace Poco
{ {
namespace Net namespace Net
@ -24,6 +25,7 @@ namespace Net
namespace DB namespace DB
{ {
constexpr int HTTP_TOO_MANY_REQUESTS = 429; constexpr int HTTP_TOO_MANY_REQUESTS = 429;
class SingleEndpointHTTPSessionPool : public PoolBase<Poco::Net::HTTPClientSession> class SingleEndpointHTTPSessionPool : public PoolBase<Poco::Net::HTTPClientSession>
@ -39,6 +41,7 @@ private:
public: public:
SingleEndpointHTTPSessionPool(const std::string & host_, UInt16 port_, bool https_, size_t max_pool_size_); SingleEndpointHTTPSessionPool(const std::string & host_, UInt16 port_, bool https_, size_t max_pool_size_);
}; };
using PooledHTTPSessionPtr = SingleEndpointHTTPSessionPool::Entry; using PooledHTTPSessionPtr = SingleEndpointHTTPSessionPool::Entry;
using HTTPSessionPtr = std::shared_ptr<Poco::Net::HTTPClientSession>; using HTTPSessionPtr = std::shared_ptr<Poco::Net::HTTPClientSession>;
@ -59,5 +62,7 @@ bool isRedirect(const Poco::Net::HTTPResponse::HTTPStatus status);
*/ */
std::istream * receiveResponse( std::istream * receiveResponse(
Poco::Net::HTTPClientSession & session, const Poco::Net::HTTPRequest & request, Poco::Net::HTTPResponse & response, bool allow_redirects); Poco::Net::HTTPClientSession & session, const Poco::Net::HTTPRequest & request, Poco::Net::HTTPResponse & response, bool allow_redirects);
void assertResponseIsOk(const Poco::Net::HTTPRequest & request, Poco::Net::HTTPResponse & response, std::istream & istr, const bool allow_redirects = false);
void assertResponseIsOk(
const Poco::Net::HTTPRequest & request, Poco::Net::HTTPResponse & response, std::istream & istr, const bool allow_redirects = false);
} }

View File

@ -27,20 +27,14 @@ bool MySQLPacketPayloadReadBuffer::nextImpl()
in.readStrict(reinterpret_cast<char *>(&payload_length), 3); in.readStrict(reinterpret_cast<char *>(&payload_length), 3);
if (payload_length > MAX_PACKET_LENGTH) if (payload_length > MAX_PACKET_LENGTH)
{ throw Exception(ErrorCodes::UNKNOWN_PACKET_FROM_CLIENT,
std::ostringstream tmp; "Received packet with payload larger than max_packet_size: {}", payload_length);
tmp << "Received packet with payload larger than max_packet_size: " << payload_length;
throw Exception(tmp.str(), ErrorCodes::UNKNOWN_PACKET_FROM_CLIENT);
}
size_t packet_sequence_id = 0; size_t packet_sequence_id = 0;
in.read(reinterpret_cast<char &>(packet_sequence_id)); in.read(reinterpret_cast<char &>(packet_sequence_id));
if (packet_sequence_id != sequence_id) if (packet_sequence_id != sequence_id)
{ throw Exception(ErrorCodes::UNKNOWN_PACKET_FROM_CLIENT,
std::ostringstream tmp; "Received packet with wrong sequence-id: {}. Expected: {}.", packet_sequence_id, static_cast<unsigned int>(sequence_id));
tmp << "Received packet with wrong sequence-id: " << packet_sequence_id << ". Expected: " << static_cast<unsigned int>(sequence_id) << '.';
throw Exception(tmp.str(), ErrorCodes::UNKNOWN_PACKET_FROM_CLIENT);
}
sequence_id++; sequence_id++;
if (payload_length == 0) if (payload_length == 0)

View File

@ -67,7 +67,7 @@ ReadBufferFromPocoSocket::ReadBufferFromPocoSocket(Poco::Net::Socket & socket_,
bool ReadBufferFromPocoSocket::poll(size_t timeout_microseconds) bool ReadBufferFromPocoSocket::poll(size_t timeout_microseconds)
{ {
return offset() != buffer().size() || socket.poll(timeout_microseconds, Poco::Net::Socket::SELECT_READ | Poco::Net::Socket::SELECT_ERROR); return available() || socket.poll(timeout_microseconds, Poco::Net::Socket::SELECT_READ | Poco::Net::Socket::SELECT_ERROR);
} }
} }

View File

@ -1 +0,0 @@
#include <IO/ReadWriteBufferFromHTTP.h>

View File

@ -72,10 +72,7 @@ public:
} }
else else
{ {
std::stringstream error_message; throw Exception(ErrorCodes::TOO_MANY_REDIRECTS, "Too many redirects while trying to access {}", initial_uri.toString());
error_message << "Too many redirects while trying to access " << initial_uri.toString();
throw Exception(error_message.str(), ErrorCodes::TOO_MANY_REDIRECTS);
} }
} }

View File

@ -248,6 +248,7 @@ void PocoHTTPClient::makeRequestInternal(
response->SetContentType(poco_response.getContentType()); response->SetContentType(poco_response.getContentType());
std::stringstream headers_ss; std::stringstream headers_ss;
headers_ss.exceptions(std::ios::failbit);
for (const auto & [header_name, header_value] : poco_response) for (const auto & [header_name, header_value] : poco_response)
{ {
response->AddHeader(header_name, header_value); response->AddHeader(header_name, header_value);

View File

@ -77,6 +77,7 @@ std::string dumpContents(const T& container,
{ {
std::stringstream sstr; std::stringstream sstr;
sstr.exceptions(std::ios::failbit);
dumpBuffer(std::begin(container), std::end(container), &sstr, col_sep, row_sep, cols_in_row); dumpBuffer(std::begin(container), std::end(container), &sstr, col_sep, row_sep, cols_in_row);
return sstr.str(); return sstr.str();

View File

@ -23,6 +23,7 @@ static void test(size_t data_size)
{ {
std::cout << "block size " << read_buffer_block_size << std::endl; std::cout << "block size " << read_buffer_block_size << std::endl;
std::stringstream io; std::stringstream io;
io.exceptions(std::ios::failbit);
DB::WriteBufferFromOStream out_impl(io); DB::WriteBufferFromOStream out_impl(io);
DB::HashingWriteBuffer out(out_impl); DB::HashingWriteBuffer out(out_impl);
out.write(data, data_size); out.write(data, data_size);

View File

@ -21,6 +21,7 @@ try
using namespace DB; using namespace DB;
std::stringstream s; std::stringstream s;
s.exceptions(std::ios::failbit);
{ {
std::string src = "1"; std::string src = "1";

View File

@ -17,6 +17,7 @@ int main(int, char **)
DB::String d = "'xyz\\"; DB::String d = "'xyz\\";
std::stringstream s; std::stringstream s;
s.exceptions(std::ios::failbit);
{ {
DB::WriteBufferFromOStream out(s); DB::WriteBufferFromOStream out(s);

View File

@ -38,7 +38,6 @@ SRCS(
ReadBufferFromMemory.cpp ReadBufferFromMemory.cpp
ReadBufferFromPocoSocket.cpp ReadBufferFromPocoSocket.cpp
ReadHelpers.cpp ReadHelpers.cpp
ReadWriteBufferFromHTTP.cpp
SeekAvoidingReadBuffer.cpp SeekAvoidingReadBuffer.cpp
UseSSL.cpp UseSSL.cpp
WriteBufferAIO.cpp WriteBufferAIO.cpp

View File

@ -107,6 +107,7 @@ String formattedAST(const ASTPtr & ast)
if (!ast) if (!ast)
return {}; return {};
std::stringstream ss; std::stringstream ss;
ss.exceptions(std::ios::failbit);
formatAST(*ast, ss, false, true); formatAST(*ast, ss, false, true);
return ss.str(); return ss.str();
} }

View File

@ -4,9 +4,10 @@
#include <Interpreters/Context.h> #include <Interpreters/Context.h>
#include <Interpreters/Cluster.h> #include <Interpreters/Cluster.h>
#include <Interpreters/IInterpreter.h> #include <Interpreters/IInterpreter.h>
#include <Parsers/queryToString.h>
#include <Interpreters/ProcessList.h> #include <Interpreters/ProcessList.h>
#include <Parsers/queryToString.h>
#include <Processors/Pipe.h> #include <Processors/Pipe.h>
#include <Storages/SelectQueryInfo.h>
namespace DB namespace DB
@ -81,16 +82,17 @@ Context updateSettingsForCluster(const Cluster & cluster, const Context & contex
} }
Pipe executeQuery( Pipe executeQuery(
IStreamFactory & stream_factory, const ClusterPtr & cluster, Poco::Logger * log, IStreamFactory & stream_factory, Poco::Logger * log,
const ASTPtr & query_ast, const Context & context, const Settings & settings, const SelectQueryInfo & query_info) const ASTPtr & query_ast, const Context & context, const SelectQueryInfo & query_info)
{ {
assert(log); assert(log);
Pipes res; Pipes res;
const Settings & settings = context.getSettingsRef();
const std::string query = queryToString(query_ast); const std::string query = queryToString(query_ast);
Context new_context = updateSettingsForCluster(*cluster, context, settings, log); Context new_context = updateSettingsForCluster(*query_info.cluster, context, settings, log);
ThrottlerPtr user_level_throttler; ThrottlerPtr user_level_throttler;
if (auto * process_list_element = context.getProcessListElement()) if (auto * process_list_element = context.getProcessListElement())
@ -109,7 +111,7 @@ Pipe executeQuery(
else else
throttler = user_level_throttler; throttler = user_level_throttler;
for (const auto & shard_info : cluster->getShardsInfo()) for (const auto & shard_info : query_info.cluster->getShardsInfo())
stream_factory.createForShard(shard_info, query, query_ast, new_context, throttler, query_info, res); stream_factory.createForShard(shard_info, query, query_ast, new_context, throttler, query_info, res);
return Pipe::unitePipes(std::move(res)); return Pipe::unitePipes(std::move(res));

View File

@ -1,7 +1,6 @@
#pragma once #pragma once
#include <Parsers/IAST.h> #include <Parsers/IAST.h>
#include <Interpreters/Cluster.h>
namespace DB namespace DB
{ {
@ -33,8 +32,7 @@ Context updateSettingsForCluster(const Cluster & cluster, const Context & contex
/// `stream_factory` object encapsulates the logic of creating streams for a different type of query /// `stream_factory` object encapsulates the logic of creating streams for a different type of query
/// (currently SELECT, DESCRIBE). /// (currently SELECT, DESCRIBE).
Pipe executeQuery( Pipe executeQuery(
IStreamFactory & stream_factory, const ClusterPtr & cluster, Poco::Logger * log, IStreamFactory & stream_factory, Poco::Logger * log, const ASTPtr & query_ast, const Context & context, const SelectQueryInfo & query_info);
const ASTPtr & query_ast, const Context & context, const Settings & settings, const SelectQueryInfo & query_info);
} }

View File

@ -65,6 +65,7 @@
#include <Interpreters/DatabaseCatalog.h> #include <Interpreters/DatabaseCatalog.h>
#include <Storages/MergeTree/BackgroundJobsExecutor.h> #include <Storages/MergeTree/BackgroundJobsExecutor.h>
namespace ProfileEvents namespace ProfileEvents
{ {
extern const Event ContextLock; extern const Event ContextLock;
@ -153,7 +154,7 @@ public:
} }
else if (it->second->key.first != context.client_info.current_user) else if (it->second->key.first != context.client_info.current_user)
{ {
throw Exception("Session belongs to a different user", ErrorCodes::LOGICAL_ERROR); throw Exception("Session belongs to a different user", ErrorCodes::SESSION_IS_LOCKED);
} }
/// Use existing session. /// Use existing session.
@ -596,7 +597,8 @@ VolumePtr Context::setTemporaryStorage(const String & path, const String & polic
{ {
StoragePolicyPtr tmp_policy = getStoragePolicySelector(lock)->get(policy_name); StoragePolicyPtr tmp_policy = getStoragePolicySelector(lock)->get(policy_name);
if (tmp_policy->getVolumes().size() != 1) if (tmp_policy->getVolumes().size() != 1)
throw Exception("Policy " + policy_name + " is used temporary files, such policy should have exactly one volume", ErrorCodes::NO_ELEMENTS_IN_CONFIG); throw Exception("Policy " + policy_name + " is used temporary files, such policy should have exactly one volume",
ErrorCodes::NO_ELEMENTS_IN_CONFIG);
shared->tmp_volume = tmp_policy->getVolume(0); shared->tmp_volume = tmp_policy->getVolume(0);
} }
@ -1083,11 +1085,13 @@ String Context::getInitialQueryId() const
void Context::setCurrentDatabaseNameInGlobalContext(const String & name) void Context::setCurrentDatabaseNameInGlobalContext(const String & name)
{ {
if (global_context != this) if (global_context != this)
throw Exception("Cannot set current database for non global context, this method should be used during server initialization", ErrorCodes::LOGICAL_ERROR); throw Exception("Cannot set current database for non global context, this method should be used during server initialization",
ErrorCodes::LOGICAL_ERROR);
auto lock = getLock(); auto lock = getLock();
if (!current_database.empty()) if (!current_database.empty())
throw Exception("Default database name cannot be changed in global context without server restart", ErrorCodes::LOGICAL_ERROR); throw Exception("Default database name cannot be changed in global context without server restart",
ErrorCodes::LOGICAL_ERROR);
current_database = name; current_database = name;
} }
@ -1470,7 +1474,7 @@ DDLWorker & Context::getDDLWorker() const
{ {
auto lock = getLock(); auto lock = getLock();
if (!shared->ddl_worker) if (!shared->ddl_worker)
throw Exception("DDL background thread is not initialized.", ErrorCodes::LOGICAL_ERROR); throw Exception("DDL background thread is not initialized.", ErrorCodes::NO_ELEMENTS_IN_CONFIG);
return *shared->ddl_worker; return *shared->ddl_worker;
} }
@ -1962,6 +1966,7 @@ void Context::checkCanBeDropped(const String & database, const String & table, c
String size_str = formatReadableSizeWithDecimalSuffix(size); String size_str = formatReadableSizeWithDecimalSuffix(size);
String max_size_to_drop_str = formatReadableSizeWithDecimalSuffix(max_size_to_drop); String max_size_to_drop_str = formatReadableSizeWithDecimalSuffix(max_size_to_drop);
std::stringstream ostr; std::stringstream ostr;
ostr.exceptions(std::ios::failbit);
ostr << "Table or Partition in " << backQuoteIfNeed(database) << "." << backQuoteIfNeed(table) << " was not dropped.\n" ostr << "Table or Partition in " << backQuoteIfNeed(database) << "." << backQuoteIfNeed(table) << " was not dropped.\n"
<< "Reason:\n" << "Reason:\n"

View File

@ -454,6 +454,7 @@ void ExpressionAction::execute(Block & block, bool dry_run) const
std::string ExpressionAction::toString() const std::string ExpressionAction::toString() const
{ {
std::stringstream ss; std::stringstream ss;
ss.exceptions(std::ios::failbit);
switch (type) switch (type)
{ {
case ADD_COLUMN: case ADD_COLUMN:
@ -550,6 +551,7 @@ void ExpressionActions::checkLimits(Block & block) const
if (non_const_columns > settings.max_temporary_non_const_columns) if (non_const_columns > settings.max_temporary_non_const_columns)
{ {
std::stringstream list_of_non_const_columns; std::stringstream list_of_non_const_columns;
list_of_non_const_columns.exceptions(std::ios::failbit);
for (size_t i = 0, size = block.columns(); i < size; ++i) for (size_t i = 0, size = block.columns(); i < size; ++i)
if (block.safeGetByPosition(i).column && !isColumnConst(*block.safeGetByPosition(i).column)) if (block.safeGetByPosition(i).column && !isColumnConst(*block.safeGetByPosition(i).column))
list_of_non_const_columns << "\n" << block.safeGetByPosition(i).name; list_of_non_const_columns << "\n" << block.safeGetByPosition(i).name;
@ -921,6 +923,7 @@ void ExpressionActions::finalize(const Names & output_columns)
std::string ExpressionActions::dumpActions() const std::string ExpressionActions::dumpActions() const
{ {
std::stringstream ss; std::stringstream ss;
ss.exceptions(std::ios::failbit);
ss << "input:\n"; ss << "input:\n";
for (const auto & input_column : input_columns) for (const auto & input_column : input_columns)
@ -1342,6 +1345,7 @@ void ExpressionActionsChain::finalize()
std::string ExpressionActionsChain::dumpChain() const std::string ExpressionActionsChain::dumpChain() const
{ {
std::stringstream ss; std::stringstream ss;
ss.exceptions(std::ios::failbit);
for (size_t i = 0; i < steps.size(); ++i) for (size_t i = 0; i < steps.size(); ++i)
{ {

View File

@ -136,6 +136,7 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
{ {
/// Currently, there are no database engines, that support any arguments. /// Currently, there are no database engines, that support any arguments.
std::stringstream ostr; std::stringstream ostr;
ostr.exceptions(std::ios::failbit);
formatAST(*create.storage, ostr, false, false); formatAST(*create.storage, ostr, false, false);
throw Exception("Unknown database engine: " + ostr.str(), ErrorCodes::UNKNOWN_DATABASE_ENGINE); throw Exception("Unknown database engine: " + ostr.str(), ErrorCodes::UNKNOWN_DATABASE_ENGINE);
} }
@ -182,6 +183,7 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
create.if_not_exists = false; create.if_not_exists = false;
std::ostringstream statement_stream; std::ostringstream statement_stream;
statement_stream.exceptions(std::ios::failbit);
formatAST(create, statement_stream, false); formatAST(create, statement_stream, false);
statement_stream << '\n'; statement_stream << '\n';
String statement = statement_stream.str(); String statement = statement_stream.str();

View File

@ -223,6 +223,7 @@ BlockInputStreamPtr InterpreterExplainQuery::executeImpl()
MutableColumns res_columns = sample_block.cloneEmptyColumns(); MutableColumns res_columns = sample_block.cloneEmptyColumns();
std::stringstream ss; std::stringstream ss;
ss.exceptions(std::ios::failbit);
if (ast.getKind() == ASTExplainQuery::ParsedAST) if (ast.getKind() == ASTExplainQuery::ParsedAST)
{ {

View File

@ -495,8 +495,10 @@ BlockIO InterpreterSelectQuery::execute()
Block InterpreterSelectQuery::getSampleBlockImpl() Block InterpreterSelectQuery::getSampleBlockImpl()
{ {
query_info.query = query_ptr;
if (storage && !options.only_analyze) if (storage && !options.only_analyze)
from_stage = storage->getQueryProcessingStage(*context, options.to_stage, query_ptr); from_stage = storage->getQueryProcessingStage(*context, options.to_stage, query_info);
/// Do I need to perform the first part of the pipeline - running on remote servers during distributed processing. /// Do I need to perform the first part of the pipeline - running on remote servers during distributed processing.
bool first_stage = from_stage < QueryProcessingStage::WithMergeableState bool first_stage = from_stage < QueryProcessingStage::WithMergeableState
@ -1433,7 +1435,6 @@ void InterpreterSelectQuery::executeFetchColumns(
if (max_streams > 1 && !is_remote) if (max_streams > 1 && !is_remote)
max_streams *= settings.max_streams_to_max_threads_ratio; max_streams *= settings.max_streams_to_max_threads_ratio;
query_info.query = query_ptr;
query_info.syntax_analyzer_result = syntax_analyzer_result; query_info.syntax_analyzer_result = syntax_analyzer_result;
query_info.sets = query_analyzer->getPreparedSets(); query_info.sets = query_analyzer->getPreparedSets();
query_info.prewhere_info = prewhere_info; query_info.prewhere_info = prewhere_info;

View File

@ -35,6 +35,7 @@ BlockInputStreamPtr InterpreterShowAccessQuery::executeImpl() const
/// Build the result column. /// Build the result column.
MutableColumnPtr column = ColumnString::create(); MutableColumnPtr column = ColumnString::create();
std::stringstream ss; std::stringstream ss;
ss.exceptions(std::ios::failbit);
for (const auto & query : queries) for (const auto & query : queries)
{ {
ss.str(""); ss.str("");

View File

@ -239,6 +239,7 @@ BlockInputStreamPtr InterpreterShowCreateAccessEntityQuery::executeImpl()
/// Build the result column. /// Build the result column.
MutableColumnPtr column = ColumnString::create(); MutableColumnPtr column = ColumnString::create();
std::stringstream create_query_ss; std::stringstream create_query_ss;
create_query_ss.exceptions(std::ios::failbit);
for (const auto & create_query : create_queries) for (const auto & create_query : create_queries)
{ {
formatAST(*create_query, create_query_ss, false, true); formatAST(*create_query, create_query_ss, false, true);
@ -248,6 +249,7 @@ BlockInputStreamPtr InterpreterShowCreateAccessEntityQuery::executeImpl()
/// Prepare description of the result column. /// Prepare description of the result column.
std::stringstream desc_ss; std::stringstream desc_ss;
desc_ss.exceptions(std::ios::failbit);
const auto & show_query = query_ptr->as<const ASTShowCreateAccessEntityQuery &>(); const auto & show_query = query_ptr->as<const ASTShowCreateAccessEntityQuery &>();
formatAST(show_query, desc_ss, false, true); formatAST(show_query, desc_ss, false, true);
String desc = desc_ss.str(); String desc = desc_ss.str();

View File

@ -79,6 +79,7 @@ BlockInputStreamPtr InterpreterShowCreateQuery::executeImpl()
} }
std::stringstream stream; std::stringstream stream;
stream.exceptions(std::ios::failbit);
formatAST(*create_query, stream, false, false); formatAST(*create_query, stream, false, false);
String res = stream.str(); String res = stream.str();

Some files were not shown because too many files have changed in this diff Show More