diff --git a/CMakeLists.txt b/CMakeLists.txt index 986096ba9e8..59a4dfe6ab5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -198,11 +198,11 @@ if(WITH_COVERAGE AND COMPILER_GCC) endif() set (CMAKE_BUILD_COLOR_MAKEFILE ON) -set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${PLATFORM_EXTRA_CXX_FLAG} -fno-omit-frame-pointer ${COMMON_WARNING_FLAGS} ${CXX_WARNING_FLAGS}") +set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${PLATFORM_EXTRA_CXX_FLAG} ${COMMON_WARNING_FLAGS} ${CXX_WARNING_FLAGS}") set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${CMAKE_CXX_FLAGS_ADD}") set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g3 -ggdb3 -fno-inline ${CMAKE_CXX_FLAGS_ADD}") -set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} -fno-omit-frame-pointer ${COMMON_WARNING_FLAGS} ${CMAKE_C_FLAGS_ADD}") +set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${COMMON_WARNING_FLAGS} ${CMAKE_C_FLAGS_ADD}") set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${CMAKE_C_FLAGS_ADD}") set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 -g3 -ggdb3 -fno-inline ${CMAKE_C_FLAGS_ADD}") @@ -382,16 +382,12 @@ add_subdirectory (contrib EXCLUDE_FROM_ALL) macro (add_executable target) # invoke built-in add_executable - _add_executable (${ARGV}) + # explicitly acquire and interpose malloc symbols by clickhouse_malloc + _add_executable (${ARGV} $) get_target_property (type ${target} TYPE) if (${type} STREQUAL EXECUTABLE) - file (RELATIVE_PATH dir ${CMAKE_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}) - if (${dir} MATCHES "^dbms") - # Only interpose operator::new/delete for dbms executables (MemoryTracker stuff) - target_link_libraries (${target} PRIVATE clickhouse_new_delete ${MALLOC_LIBRARIES}) - else () - target_link_libraries (${target} PRIVATE ${MALLOC_LIBRARIES}) - endif () + # operator::new/delete for executables (MemoryTracker stuff) + target_link_libraries (${target} PRIVATE clickhouse_new_delete ${MALLOC_LIBRARIES}) endif() endmacro() diff --git a/README.md b/README.md index ae4abb10941..5d9faa11fbe 100644 --- a/README.md +++ b/README.md @@ -14,5 +14,4 @@ ClickHouse is an open-source column-oriented database management system that all ## Upcoming Events -* [ClickHouse Meetup in San Francisco](https://www.eventbrite.com/e/clickhouse-december-meetup-registration-78642047481) on December 3. * [ClickHouse Meetup in Moscow](https://yandex.ru/promo/clickhouse/moscow-december-2019) on December 11. diff --git a/cmake/find/termcap.cmake b/cmake/find/termcap.cmake index 47b772331bb..7564b7134e7 100644 --- a/cmake/find/termcap.cmake +++ b/cmake/find/termcap.cmake @@ -1,5 +1,8 @@ -find_library (TERMCAP_LIBRARY termcap) +find_library (TERMCAP_LIBRARY tinfo) if (NOT TERMCAP_LIBRARY) - find_library (TERMCAP_LIBRARY tinfo) + find_library (TERMCAP_LIBRARY ncurses) +endif() +if (NOT TERMCAP_LIBRARY) + find_library (TERMCAP_LIBRARY termcap) endif() message (STATUS "Using termcap: ${TERMCAP_LIBRARY}") diff --git a/cmake/tools.cmake b/cmake/tools.cmake index 5a183f9eeba..a97e7466286 100644 --- a/cmake/tools.cmake +++ b/cmake/tools.cmake @@ -20,16 +20,41 @@ else () message (WARNING "You are using an unsupported compiler. Compilation has only been tested with Clang 6+ and GCC 7+.") endif () -option (LINKER_NAME "Linker name or full path") +STRING(REGEX MATCHALL "[0-9]+" COMPILER_VERSION_LIST ${CMAKE_CXX_COMPILER_VERSION}) +LIST(GET COMPILER_VERSION_LIST 0 COMPILER_VERSION_MAJOR) -find_program (LLD_PATH NAMES "ld.lld" "lld") +option (LINKER_NAME "Linker name or full path") +find_program (LLD_PATH NAMES "ld.lld" "lld" "lld-${COMPILER_VERSION_MAJOR}") find_program (GOLD_PATH NAMES "ld.gold" "gold") +# We prefer LLD linker over Gold or BFD. + if (NOT LINKER_NAME) if (LLD_PATH) - set (LINKER_NAME "lld") - elseif (GOLD_PATH) - set (LINKER_NAME "gold") + # GCC driver requires one of supported linker names like "lld". + # Clang driver simply allows full linker path. + + if (COMPILER_GCC) + get_filename_component(LLD_BASENAME ${LLD_PATH} NAME) + if (LLD_BASENAME STREQUAL ld.lld) + set (LINKER_NAME "lld") + endif () + else () + set (LINKER_NAME ${LLD_PATH}) + endif () + endif () +endif () + +if (NOT LINKER_NAME) + if (GOLD_PATH) + if (COMPILER_GCC) + get_filename_component(GOLD_BASENAME ${GOLD_PATH} NAME) + if (GOLD_BASENAME STREQUAL ld.gold) + set (LINKER_NAME "gold") + endif () + else () + set (LINKER_NAME ${GOLD_PATH}) + endif () endif () endif () diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index b0a271b21ac..a13299b0ffa 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -52,6 +52,7 @@ if (USE_INTERNAL_BTRIE_LIBRARY) endif () if (USE_INTERNAL_ZLIB_LIBRARY) + unset (BUILD_SHARED_LIBS CACHE) set (ZLIB_ENABLE_TESTS 0 CACHE INTERNAL "") set (SKIP_INSTALL_ALL 1 CACHE INTERNAL "") set (ZLIB_COMPAT 1 CACHE INTERNAL "") # also enables WITH_GZFILEOP diff --git a/contrib/libunwind-cmake/CMakeLists.txt b/contrib/libunwind-cmake/CMakeLists.txt index f09d0979692..7a6648d8dc6 100644 --- a/contrib/libunwind-cmake/CMakeLists.txt +++ b/contrib/libunwind-cmake/CMakeLists.txt @@ -11,7 +11,9 @@ endif () set(LIBUNWIND_C_SOURCES ${LIBUNWIND_SOURCE_DIR}/src/UnwindLevel1.c ${LIBUNWIND_SOURCE_DIR}/src/UnwindLevel1-gcc-ext.c - ${LIBUNWIND_SOURCE_DIR}/src/Unwind-sjlj.c) + ${LIBUNWIND_SOURCE_DIR}/src/Unwind-sjlj.c + # Use unw_backtrace to override libgcc's backtrace symbol for better ABI compatibility + unwind-override.c) set_source_files_properties(${LIBUNWIND_C_SOURCES} PROPERTIES COMPILE_FLAGS "-std=c99") set(LIBUNWIND_ASM_SOURCES diff --git a/contrib/libunwind-cmake/unwind-override.c b/contrib/libunwind-cmake/unwind-override.c new file mode 100644 index 00000000000..616bab6ae4b --- /dev/null +++ b/contrib/libunwind-cmake/unwind-override.c @@ -0,0 +1,6 @@ +#include + +int backtrace(void ** buffer, int size) +{ + return unw_backtrace(buffer, size); +} diff --git a/contrib/poco b/contrib/poco index 2b273bfe9db..d478f62bd93 160000 --- a/contrib/poco +++ b/contrib/poco @@ -1 +1 @@ -Subproject commit 2b273bfe9db89429b2040c024484dee0197e48c7 +Subproject commit d478f62bd93c9cd14eb343756ef73a4ae622ddf5 diff --git a/contrib/zlib-ng b/contrib/zlib-ng index cff0f500d93..bba56a73be2 160000 --- a/contrib/zlib-ng +++ b/contrib/zlib-ng @@ -1 +1 @@ -Subproject commit cff0f500d9399d7cd3b9461a693d211e4b86fcc9 +Subproject commit bba56a73be249514acfbc7d49aa2a68994dad8ab diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt index 510faed187b..005c9bb072b 100644 --- a/dbms/CMakeLists.txt +++ b/dbms/CMakeLists.txt @@ -100,7 +100,7 @@ set(dbms_sources) add_headers_and_sources(clickhouse_common_io src/Common) add_headers_and_sources(clickhouse_common_io src/Common/HashTable) add_headers_and_sources(clickhouse_common_io src/IO) -list (REMOVE_ITEM clickhouse_common_io_sources src/Common/new_delete.cpp) +list (REMOVE_ITEM clickhouse_common_io_sources src/Common/malloc.cpp src/Common/new_delete.cpp) if(USE_RDKAFKA) add_headers_and_sources(dbms src/Storages/Kafka) @@ -140,6 +140,9 @@ endif () add_library(clickhouse_common_io ${clickhouse_common_io_headers} ${clickhouse_common_io_sources}) +add_library (clickhouse_malloc OBJECT src/Common/malloc.cpp) +set_source_files_properties(src/Common/malloc.cpp PROPERTIES COMPILE_FLAGS "-fno-builtin") + add_library (clickhouse_new_delete STATIC src/Common/new_delete.cpp) target_link_libraries (clickhouse_new_delete PRIVATE clickhouse_common_io) @@ -376,6 +379,10 @@ if (USE_POCO_MONGODB) dbms_target_link_libraries (PRIVATE ${Poco_MongoDB_LIBRARY}) endif() +if (USE_POCO_REDIS) + dbms_target_link_libraries (PRIVATE ${Poco_Redis_LIBRARY}) +endif() + if (USE_POCO_NETSSL) target_link_libraries (clickhouse_common_io PRIVATE ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY}) dbms_target_link_libraries (PRIVATE ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY}) @@ -428,6 +435,8 @@ if (USE_JEMALLOC) if(NOT MAKE_STATIC_LIBRARIES AND ${JEMALLOC_LIBRARIES} MATCHES "${CMAKE_STATIC_LIBRARY_SUFFIX}$") # mallctl in dbms/src/Interpreters/AsynchronousMetrics.cpp + # Actually we link JEMALLOC to almost all libraries. + # This is just hotfix for some uninvestigated problem. target_link_libraries(clickhouse_interpreters PRIVATE ${JEMALLOC_LIBRARIES}) endif() endif () diff --git a/dbms/cmake/version.cmake b/dbms/cmake/version.cmake index 3afa379fee2..220af3d87dc 100644 --- a/dbms/cmake/version.cmake +++ b/dbms/cmake/version.cmake @@ -1,11 +1,11 @@ # This strings autochanged from release_lib.sh: -set(VERSION_REVISION 54429) +set(VERSION_REVISION 54430) set(VERSION_MAJOR 19) -set(VERSION_MINOR 18) +set(VERSION_MINOR 19) set(VERSION_PATCH 1) -set(VERSION_GITHASH 4e68211879480b637683ae66dbcc89a2714682af) -set(VERSION_DESCRIBE v19.18.1.1-prestable) -set(VERSION_STRING 19.18.1.1) +set(VERSION_GITHASH 8bd9709d1dec3366e35d2efeab213435857f67a9) +set(VERSION_DESCRIBE v19.19.1.1-prestable) +set(VERSION_STRING 19.19.1.1) # end of autochange set(VERSION_EXTRA "" CACHE STRING "") diff --git a/dbms/programs/server/HTTPHandler.cpp b/dbms/programs/server/HTTPHandler.cpp index cefa3712997..29d186def2d 100644 --- a/dbms/programs/server/HTTPHandler.cpp +++ b/dbms/programs/server/HTTPHandler.cpp @@ -34,7 +34,6 @@ #include #include #include -#include #include #include diff --git a/dbms/programs/server/MySQLHandler.cpp b/dbms/programs/server/MySQLHandler.cpp index 56ec25686dd..a147ccafba0 100644 --- a/dbms/programs/server/MySQLHandler.cpp +++ b/dbms/programs/server/MySQLHandler.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #if USE_POCO_NETSSL #include @@ -220,7 +221,8 @@ void MySQLHandler::authenticate(const String & user_name, const String & auth_pl { // For compatibility with JavaScript MySQL client, Native41 authentication plugin is used when possible (if password is specified using double SHA1). Otherwise SHA256 plugin is used. auto user = connection_context.getUser(user_name); - if (user->authentication.getType() != DB::Authentication::DOUBLE_SHA1_PASSWORD) + const DB::Authentication::Type user_auth_type = user->authentication.getType(); + if (user_auth_type != DB::Authentication::DOUBLE_SHA1_PASSWORD && user_auth_type != DB::Authentication::PLAINTEXT_PASSWORD && user_auth_type != DB::Authentication::NO_PASSWORD) { authPluginSSL(); } @@ -267,29 +269,49 @@ void MySQLHandler::comPing() packet_sender->sendPacket(OK_Packet(0x0, client_capability_flags, 0, 0, 0), true); } +static bool isFederatedServerSetupCommand(const String & query); + void MySQLHandler::comQuery(ReadBuffer & payload) { - bool with_output = false; - std::function set_content_type = [&with_output](const String &) -> void { - with_output = true; - }; + String query = String(payload.position(), payload.buffer().end()); - const String query("select ''"); - ReadBufferFromString empty_select(query); - - bool should_replace = false; - // Translate query from MySQL to ClickHouse. - // This is a temporary workaround until ClickHouse supports the syntax "@@var_name". - if (std::string(payload.position(), payload.buffer().end()) == "select @@version_comment limit 1") // MariaDB client starts session with that query + // This is a workaround in order to support adding ClickHouse to MySQL using federated server. + // As Clickhouse doesn't support these statements, we just send OK packet in response. + if (isFederatedServerSetupCommand(query)) { - should_replace = true; - } - - Context query_context = connection_context; - executeQuery(should_replace ? empty_select : payload, *out, true, query_context, set_content_type, nullptr); - - if (!with_output) packet_sender->sendPacket(OK_Packet(0x00, client_capability_flags, 0, 0, 0), true); + } + else + { + bool with_output = false; + std::function set_content_type = [&with_output](const String &) -> void { + with_output = true; + }; + + String replacement_query = "select ''"; + bool should_replace = false; + + // Translate query from MySQL to ClickHouse. + // This is a temporary workaround until ClickHouse supports the syntax "@@var_name". + if (query == "select @@version_comment limit 1") // MariaDB client starts session with that query + { + should_replace = true; + } + // This is a workaround in order to support adding ClickHouse to MySQL using federated server. + if (0 == strncasecmp("SHOW TABLE STATUS LIKE", query.c_str(), 22)) + { + should_replace = true; + replacement_query = boost::replace_all_copy(query, "SHOW TABLE STATUS LIKE ", show_table_status_replacement_query); + } + + ReadBufferFromString replacement(replacement_query); + + Context query_context = connection_context; + executeQuery(should_replace ? replacement : payload, *out, true, query_context, set_content_type, nullptr); + + if (!with_output) + packet_sender->sendPacket(OK_Packet(0x00, client_capability_flags, 0, 0, 0), true); + } } void MySQLHandler::authPluginSSL() @@ -335,4 +357,33 @@ void MySQLHandlerSSL::finishHandshakeSSL(size_t packet_size, char * buf, size_t #endif +static bool isFederatedServerSetupCommand(const String & query) +{ + return 0 == strncasecmp("SET NAMES", query.c_str(), 9) || 0 == strncasecmp("SET character_set_results", query.c_str(), 25) + || 0 == strncasecmp("SET FOREIGN_KEY_CHECKS", query.c_str(), 22) || 0 == strncasecmp("SET AUTOCOMMIT", query.c_str(), 14) + || 0 == strncasecmp("SET SESSION TRANSACTION ISOLATION LEVEL", query.c_str(), 39); +} + +const String MySQLHandler::show_table_status_replacement_query("SELECT" + " name AS Name," + " engine AS Engine," + " '10' AS Version," + " 'Dynamic' AS Row_format," + " 0 AS Rows," + " 0 AS Avg_row_length," + " 0 AS Data_length," + " 0 AS Max_data_length," + " 0 AS Index_length," + " 0 AS Data_free," + " 'NULL' AS Auto_increment," + " metadata_modification_time AS Create_time," + " metadata_modification_time AS Update_time," + " metadata_modification_time AS Check_time," + " 'utf8_bin' AS Collation," + " 'NULL' AS Checksum," + " '' AS Create_options," + " '' AS Comment" + " FROM system.tables" + " WHERE name LIKE "); + } diff --git a/dbms/programs/server/MySQLHandler.h b/dbms/programs/server/MySQLHandler.h index 96cb353d897..42629470632 100644 --- a/dbms/programs/server/MySQLHandler.h +++ b/dbms/programs/server/MySQLHandler.h @@ -11,7 +11,6 @@ namespace DB { - /// Handler for MySQL wire protocol connections. Allows to connect to ClickHouse using MySQL client. class MySQLHandler : public Poco::Net::TCPServerConnection { @@ -59,6 +58,9 @@ protected: std::shared_ptr out; bool secure_connection = false; + +private: + static const String show_table_status_replacement_query; }; #if USE_SSL && USE_POCO_NETSSL diff --git a/dbms/programs/server/Server.cpp b/dbms/programs/server/Server.cpp index 7952ccc9966..6fbdb48d631 100644 --- a/dbms/programs/server/Server.cpp +++ b/dbms/programs/server/Server.cpp @@ -243,6 +243,8 @@ int Server::main(const std::vector & /*args*/) } #endif + global_context->setRemoteHostFilter(config()); + std::string path = getCanonicalPath(config().getString("path", DBMS_DEFAULT_PATH)); std::string default_database = config().getString("default_database", "default"); diff --git a/dbms/programs/server/TCPHandler.cpp b/dbms/programs/server/TCPHandler.cpp index 76ea69cc737..3378878e718 100644 --- a/dbms/programs/server/TCPHandler.cpp +++ b/dbms/programs/server/TCPHandler.cpp @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -201,6 +200,8 @@ void TCPHandler::runImpl() /// So, the stream has been marked as cancelled and we can't read from it anymore. state.block_in.reset(); state.maybe_compressed_in.reset(); /// For more accurate accounting by MemoryTracker. + + state.temporary_tables_read = true; }); /// Send structure of columns to client for function input() @@ -340,6 +341,18 @@ void TCPHandler::runImpl() LOG_WARNING(log, "Client has gone away."); } + try + { + if (exception && !state.temporary_tables_read) + query_context->initializeExternalTablesIfSet(); + } + catch (...) + { + network_error = true; + LOG_WARNING(log, "Can't read external tables after query failure."); + } + + try { query_scope.reset(); diff --git a/dbms/programs/server/TCPHandler.h b/dbms/programs/server/TCPHandler.h index 561ed4d0eca..4ab9097b9bb 100644 --- a/dbms/programs/server/TCPHandler.h +++ b/dbms/programs/server/TCPHandler.h @@ -63,6 +63,8 @@ struct QueryState bool sent_all_data = false; /// Request requires data from the client (INSERT, but not INSERT SELECT). bool need_receive_data_for_insert = false; + /// Temporary tables read + bool temporary_tables_read = false; /// Request requires data from client for function input() bool need_receive_data_for_input = false; diff --git a/dbms/programs/server/config.xml b/dbms/programs/server/config.xml index 248d37bc39d..efdda3abd06 100644 --- a/dbms/programs/server/config.xml +++ b/dbms/programs/server/config.xml @@ -3,6 +3,25 @@ NOTE: User and query level settings are set up in "users.xml" file. --> + + + + + + + trace @@ -15,7 +34,6 @@ 8123 9000 - + 65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5 + + ::/0 + + default + default + + e395796d6546b1b65db9d665cd43f0e858dd4303 diff --git a/dbms/tests/integration/test_mysql_protocol/test.py b/dbms/tests/integration/test_mysql_protocol/test.py index f8d79cb2e32..3f4f4e2a2f8 100644 --- a/dbms/tests/integration/test_mysql_protocol/test.py +++ b/dbms/tests/integration/test_mysql_protocol/test.py @@ -1,9 +1,12 @@ # coding: utf-8 -import os import docker +import datetime +import math +import os import pytest import subprocess +import time import pymysql.connections from docker.models.containers import Container @@ -36,6 +39,25 @@ def mysql_client(): yield docker.from_env().containers.get(cluster.project_name + '_mysql1_1') +@pytest.fixture(scope='module') +def mysql_server(mysql_client): + """Return MySQL container when it is healthy. + + :type mysql_client: Container + :rtype: Container + """ + retries = 30 + for i in range(retries): + info = mysql_client.client.api.inspect_container(mysql_client.name) + if info['State']['Health']['Status'] == 'healthy': + break + time.sleep(1) + else: + raise Exception('Mysql server has not started in %d seconds.' % retries) + + return mysql_client + + @pytest.fixture(scope='module') def golang_container(): docker_compose = os.path.join(SCRIPT_DIR, 'clients', 'golang', 'docker_compose.yml') @@ -109,7 +131,51 @@ def test_mysql_client(mysql_client, server_address): assert stdout == '\n'.join(['column', '0', '0', '1', '1', '5', '5', 'tmp_column', '0', '1', '']) +def test_mysql_federated(mysql_server, server_address): + node.query('''DROP DATABASE IF EXISTS mysql_federated''', settings={"password": "123"}) + node.query('''CREATE DATABASE mysql_federated''', settings={"password": "123"}) + node.query('''CREATE TABLE mysql_federated.test (col UInt32) ENGINE = Log''', settings={"password": "123"}) + node.query('''INSERT INTO mysql_federated.test VALUES (0), (1), (5)''', settings={"password": "123"}) + + code, (_, stderr) = mysql_server.exec_run(''' + mysql + -e "DROP SERVER IF EXISTS clickhouse;" + -e "CREATE SERVER clickhouse FOREIGN DATA WRAPPER mysql OPTIONS (USER 'default', PASSWORD '123', HOST '{host}', PORT {port}, DATABASE 'mysql_federated');" + -e "DROP DATABASE IF EXISTS mysql_federated;" + -e "CREATE DATABASE mysql_federated;" + '''.format(host=server_address, port=server_port), demux=True) + + assert code == 0 + + code, (stdout, stderr) = mysql_server.exec_run(''' + mysql + -e "CREATE TABLE mysql_federated.test(`col` int UNSIGNED) ENGINE=FEDERATED CONNECTION='clickhouse';" + -e "SELECT * FROM mysql_federated.test ORDER BY col;" + '''.format(host=server_address, port=server_port), demux=True) + + assert stdout == '\n'.join(['col', '0', '1', '5', '']) + + code, (stdout, stderr) = mysql_server.exec_run(''' + mysql + -e "INSERT INTO mysql_federated.test VALUES (0), (1), (5);" + -e "SELECT * FROM mysql_federated.test ORDER BY col;" + '''.format(host=server_address, port=server_port), demux=True) + + assert stdout == '\n'.join(['col', '0', '0', '1', '1', '5', '5', '']) + + def test_python_client(server_address): + client = pymysql.connections.Connection(host=server_address, user='user_with_double_sha1', password='abacaba', database='default', port=server_port) + + with pytest.raises(pymysql.InternalError) as exc_info: + client.query('select name from tables') + + assert exc_info.value.args == (60, "Table default.tables doesn't exist.") + + cursor = client.cursor(pymysql.cursors.DictCursor) + cursor.execute("select 1 as a, 'тест' as b") + assert cursor.fetchall() == [{'a': 1, 'b': 'тест'}] + with pytest.raises(pymysql.InternalError) as exc_info: pymysql.connections.Connection(host=server_address, user='default', password='abacab', database='default', port=server_port) @@ -124,7 +190,7 @@ def test_python_client(server_address): cursor = client.cursor(pymysql.cursors.DictCursor) cursor.execute("select 1 as a, 'тест' as b") - assert cursor.fetchall() == [{'a': '1', 'b': 'тест'}] + assert cursor.fetchall() == [{'a': 1, 'b': 'тест'}] client.select_db('system') @@ -140,11 +206,14 @@ def test_python_client(server_address): cursor.execute("INSERT INTO table1 VALUES (1), (3)") cursor.execute("INSERT INTO table1 VALUES (1), (4)") cursor.execute("SELECT * FROM table1 ORDER BY a") - assert cursor.fetchall() == [{'a': '1'}, {'a': '1'}, {'a': '3'}, {'a': '4'}] + assert cursor.fetchall() == [{'a': 1}, {'a': 1}, {'a': 3}, {'a': 4}] def test_golang_client(server_address, golang_container): # type: (str, Container) -> None + with open(os.path.join(SCRIPT_DIR, 'clients', 'golang', '0.reference')) as fp: + reference = fp.read() + code, (stdout, stderr) = golang_container.exec_run('./main --host {host} --port {port} --user default --password 123 --database ' 'abc'.format(host=server_address, port=server_port), demux=True) @@ -155,10 +224,12 @@ def test_golang_client(server_address, golang_container): 'default'.format(host=server_address, port=server_port), demux=True) assert code == 0 + assert stdout == reference - with open(os.path.join(SCRIPT_DIR, 'clients', 'golang', '0.reference')) as fp: - reference = fp.read() - assert stdout == reference + code, (stdout, stderr) = golang_container.exec_run('./main --host {host} --port {port} --user user_with_double_sha1 --password abacaba --database ' + 'default'.format(host=server_address, port=server_port), demux=True) + assert code == 0 + assert stdout == reference def test_php_client(server_address, php_container): @@ -171,18 +242,80 @@ def test_php_client(server_address, php_container): assert code == 0 assert stdout == 'tables\n' + code, (stdout, stderr) = php_container.exec_run('php -f test.php {host} {port} user_with_double_sha1 abacaba'.format(host=server_address, port=server_port), demux=True) + assert code == 0 + assert stdout == 'tables\n' + + code, (stdout, stderr) = php_container.exec_run('php -f test_ssl.php {host} {port} user_with_double_sha1 abacaba'.format(host=server_address, port=server_port), demux=True) + assert code == 0 + assert stdout == 'tables\n' + def test_mysqljs_client(server_address, nodejs_container): - code, (_, stderr) = nodejs_container.exec_run('node test.js {host} {port} default 123'.format(host=server_address, port=server_port), demux=True) + code, (_, stderr) = nodejs_container.exec_run('node test.js {host} {port} user_with_sha256 abacaba'.format(host=server_address, port=server_port), demux=True) assert code == 1 assert 'MySQL is requesting the sha256_password authentication method, which is not supported.' in stderr code, (_, stderr) = nodejs_container.exec_run('node test.js {host} {port} user_with_empty_password ""'.format(host=server_address, port=server_port), demux=True) - assert code == 1 - assert 'MySQL is requesting the sha256_password authentication method, which is not supported.' in stderr + assert code == 0 code, (_, _) = nodejs_container.exec_run('node test.js {host} {port} user_with_double_sha1 abacaba'.format(host=server_address, port=server_port), demux=True) assert code == 0 code, (_, _) = nodejs_container.exec_run('node test.js {host} {port} user_with_empty_password 123'.format(host=server_address, port=server_port), demux=True) assert code == 1 + + +def test_types(server_address): + client = pymysql.connections.Connection(host=server_address, user='default', password='123', database='default', port=server_port) + + cursor = client.cursor(pymysql.cursors.DictCursor) + cursor.execute( + "select " + "toInt8(-pow(2, 7)) as Int8_column, " + "toUInt8(pow(2, 8) - 1) as UInt8_column, " + "toInt16(-pow(2, 15)) as Int16_column, " + "toUInt16(pow(2, 16) - 1) as UInt16_column, " + "toInt32(-pow(2, 31)) as Int32_column, " + "toUInt32(pow(2, 32) - 1) as UInt32_column, " + "toInt64('-9223372036854775808') as Int64_column, " # -2^63 + "toUInt64('18446744073709551615') as UInt64_column, " # 2^64 - 1 + "'тест' as String_column, " + "toFixedString('тест', 8) as FixedString_column, " + "toFloat32(1.5) as Float32_column, " + "toFloat64(1.5) as Float64_column, " + "toFloat32(NaN) as Float32_NaN_column, " + "-Inf as Float64_Inf_column, " + "toDate('2019-12-08') as Date_column, " + "toDate('1970-01-01') as Date_min_column, " + "toDate('1970-01-02') as Date_after_min_column, " + "toDateTime('2019-12-08 08:24:03') as DateTime_column" + ) + + result = cursor.fetchall()[0] + expected = [ + ('Int8_column', -2 ** 7), + ('UInt8_column', 2 ** 8 - 1), + ('Int16_column', -2 ** 15), + ('UInt16_column', 2 ** 16 - 1), + ('Int32_column', -2 ** 31), + ('UInt32_column', 2 ** 32 - 1), + ('Int64_column', -2 ** 63), + ('UInt64_column', 2 ** 64 - 1), + ('String_column', 'тест'), + ('FixedString_column', 'тест'), + ('Float32_column', 1.5), + ('Float64_column', 1.5), + ('Float32_NaN_column', float('nan')), + ('Float64_Inf_column', float('-inf')), + ('Date_column', datetime.date(2019, 12, 8)), + ('Date_min_column', '0000-00-00'), + ('Date_after_min_column', datetime.date(1970, 1, 2)), + ('DateTime_column', datetime.datetime(2019, 12, 8, 8, 24, 3)), + ] + + for key, value in expected: + if isinstance(value, float) and math.isnan(value): + assert math.isnan(result[key]) + else: + assert result[key] == value diff --git a/dbms/tests/integration/test_quota/__init__.py b/dbms/tests/integration/test_quota/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/integration/test_quota/configs/users.d/quota.xml b/dbms/tests/integration/test_quota/configs/users.d/quota.xml new file mode 120000 index 00000000000..9b12dec9c53 --- /dev/null +++ b/dbms/tests/integration/test_quota/configs/users.d/quota.xml @@ -0,0 +1 @@ +../../normal_limits.xml \ No newline at end of file diff --git a/dbms/tests/integration/test_quota/configs/users.xml b/dbms/tests/integration/test_quota/configs/users.xml new file mode 100644 index 00000000000..15a5364449b --- /dev/null +++ b/dbms/tests/integration/test_quota/configs/users.xml @@ -0,0 +1,18 @@ + + + + + + + + + + + ::/0 + + default + myQuota + true + + + diff --git a/dbms/tests/integration/test_quota/no_quotas.xml b/dbms/tests/integration/test_quota/no_quotas.xml new file mode 100644 index 00000000000..9aba4ac0914 --- /dev/null +++ b/dbms/tests/integration/test_quota/no_quotas.xml @@ -0,0 +1,3 @@ + + + diff --git a/dbms/tests/integration/test_quota/normal_limits.xml b/dbms/tests/integration/test_quota/normal_limits.xml new file mode 100644 index 00000000000..b7c3a67b5cc --- /dev/null +++ b/dbms/tests/integration/test_quota/normal_limits.xml @@ -0,0 +1,17 @@ + + + + + + + 31556952 + + + 1000 + 0 + 1000 + 0 + + + + diff --git a/dbms/tests/integration/test_quota/simpliest.xml b/dbms/tests/integration/test_quota/simpliest.xml new file mode 100644 index 00000000000..6d51d68d8d9 --- /dev/null +++ b/dbms/tests/integration/test_quota/simpliest.xml @@ -0,0 +1,7 @@ + + + + + + + diff --git a/dbms/tests/integration/test_quota/test.py b/dbms/tests/integration/test_quota/test.py new file mode 100644 index 00000000000..e7caaf5cd06 --- /dev/null +++ b/dbms/tests/integration/test_quota/test.py @@ -0,0 +1,251 @@ +import pytest +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import assert_eq_with_retry +import os +import re +import time + +cluster = ClickHouseCluster(__file__) +instance = cluster.add_instance('instance', + config_dir="configs") + +query_from_system_quotas = "SELECT * FROM system.quotas ORDER BY name"; + +query_from_system_quota_usage = "SELECT id, key, duration, "\ + "queries, errors, result_rows, result_bytes, read_rows, read_bytes "\ + "FROM system.quota_usage ORDER BY id, key, duration"; + +def system_quotas(): + return instance.query(query_from_system_quotas).rstrip('\n') + +def system_quota_usage(): + return instance.query(query_from_system_quota_usage).rstrip('\n') + + +def copy_quota_xml(local_file_name, reload_immediately = True): + script_dir = os.path.dirname(os.path.realpath(__file__)) + instance.copy_file_to_container(os.path.join(script_dir, local_file_name), '/etc/clickhouse-server/users.d/quota.xml') + if reload_immediately: + instance.query("SYSTEM RELOAD CONFIG") + + +@pytest.fixture(scope="module", autouse=True) +def started_cluster(): + try: + cluster.start() + + instance.query("CREATE TABLE test_table(x UInt32) ENGINE = MergeTree ORDER BY tuple()") + instance.query("INSERT INTO test_table SELECT number FROM numbers(50)") + + yield cluster + + finally: + cluster.shutdown() + + +@pytest.fixture(autouse=True) +def reset_quotas_and_usage_info(): + try: + yield + finally: + instance.query("DROP QUOTA IF EXISTS qA, qB") + copy_quota_xml('simpliest.xml') # To reset usage info. + copy_quota_xml('normal_limits.xml') + + +def test_quota_from_users_xml(): + assert instance.query("SELECT currentQuota()") == "myQuota\n" + assert instance.query("SELECT currentQuotaID()") == "e651da9c-a748-8703-061a-7e5e5096dae7\n" + assert instance.query("SELECT currentQuotaKey()") == "default\n" + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t0\t[]\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0" + + instance.query("SELECT * from test_table") + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t1\t0\t50\t200\t50\t200" + + instance.query("SELECT COUNT() from test_table") + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t2\t0\t51\t208\t50\t200" + + +def test_simpliest_quota(): + # Simpliest quota doesn't even track usage. + copy_quota_xml('simpliest.xml') + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t0\t[]\t[]\t[]\t[]\t[]\t[]\t[]\t[]\t[]\t[]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N" + + instance.query("SELECT * from test_table") + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N" + + +def test_tracking_quota(): + # Now we're tracking usage. + copy_quota_xml('tracking.xml') + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t0\t[]\t[31556952]\t[0]\t[0]\t[0]\t[0]\t[0]\t[0]\t[0]\t[0]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0" + + instance.query("SELECT * from test_table") + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t1\t0\t50\t200\t50\t200" + + instance.query("SELECT COUNT() from test_table") + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t2\t0\t51\t208\t50\t200" + + +def test_exceed_quota(): + # Change quota, now the limits are tiny so we will exceed the quota. + copy_quota_xml('tiny_limits.xml') + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t0\t[]\t[31556952]\t[0]\t[1]\t[1]\t[1]\t[0]\t[1]\t[0]\t[0]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0" + + assert re.search("Quota.*has\ been\ exceeded", instance.query_and_get_error("SELECT * from test_table")) + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t1\t1\t0\t0\t50\t0" + + # Change quota, now the limits are enough to execute queries. + copy_quota_xml('normal_limits.xml') + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t0\t[]\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t1\t1\t0\t0\t50\t0" + + instance.query("SELECT * from test_table") + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t2\t1\t50\t200\t100\t200" + + +def test_add_remove_interval(): + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t0\t[]\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0" + + # Add interval. + copy_quota_xml('two_intervals.xml') + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t0\t[]\t[31556952,63113904]\t[0,1]\t[1000,0]\t[0,0]\t[0,0]\t[0,30000]\t[1000,0]\t[0,20000]\t[0,120]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0\n"\ + "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t63113904\t0\t0\t0\t0\t0\t0" + + instance.query("SELECT * from test_table") + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t1\t0\t50\t200\t50\t200\n"\ + "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t63113904\t1\t0\t50\t200\t50\t200" + + # Remove interval. + copy_quota_xml('normal_limits.xml') + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t0\t[]\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t1\t0\t50\t200\t50\t200" + + instance.query("SELECT * from test_table") + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t2\t0\t100\t400\t100\t400" + + # Remove all intervals. + copy_quota_xml('simpliest.xml') + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t0\t[]\t[]\t[]\t[]\t[]\t[]\t[]\t[]\t[]\t[]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N" + + instance.query("SELECT * from test_table") + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N" + + # Add one interval back. + copy_quota_xml('normal_limits.xml') + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t0\t[]\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0" + + +def test_add_remove_quota(): + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t0\t[]\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0" + + # Add quota. + copy_quota_xml('two_quotas.xml') + assert system_quotas() ==\ + "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t0\t[]\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]\n"\ + "myQuota2\t4590510c-4d13-bf21-ec8a-c2187b092e73\tusers.xml\tclient key or user name\t[]\t0\t[]\t[3600,2629746]\t[1,0]\t[0,0]\t[0,0]\t[4000,0]\t[400000,0]\t[4000,0]\t[400000,0]\t[60,1800]" + + # Drop quota. + copy_quota_xml('normal_limits.xml') + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t0\t[]\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" + + # Drop all quotas. + copy_quota_xml('no_quotas.xml') + assert system_quotas() == "" + assert system_quota_usage() == "" + + # Add one quota back. + copy_quota_xml('normal_limits.xml') + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t0\t[]\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0" + + +def test_reload_users_xml_by_timer(): + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t0\t[]\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" + + time.sleep(1) # The modification time of the 'quota.xml' file should be different, + # because config files are reload by timer only when the modification time is changed. + copy_quota_xml('tiny_limits.xml', reload_immediately=False) + assert_eq_with_retry(instance, query_from_system_quotas, "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t0\t[]\t[31556952]\t[0]\t[1]\t[1]\t[1]\t[0]\t[1]\t[0]\t[0]") + + +def test_dcl_introspection(): + assert instance.query("SHOW QUOTAS") == "myQuota\n" + assert instance.query("SHOW CREATE QUOTA myQuota") == "CREATE QUOTA myQuota KEYED BY \\'user name\\' FOR INTERVAL 1 YEAR MAX QUERIES = 1000, MAX READ ROWS = 1000 TO default\n" + expected_usage = "myQuota key=\\\\'default\\\\' interval=\[.*\] queries=0/1000 errors=0 result_rows=0 result_bytes=0 read_rows=0/1000 read_bytes=0 execution_time=0" + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE CURRENT")) + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE ALL")) + + instance.query("SELECT * from test_table") + expected_usage = "myQuota key=\\\\'default\\\\' interval=\[.*\] queries=1/1000 errors=0 result_rows=50 result_bytes=200 read_rows=50/1000 read_bytes=200 execution_time=.*" + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) + + # Add interval. + copy_quota_xml('two_intervals.xml') + assert instance.query("SHOW QUOTAS") == "myQuota\n" + assert instance.query("SHOW CREATE QUOTA myQuota") == "CREATE QUOTA myQuota KEYED BY \\'user name\\' FOR INTERVAL 1 YEAR MAX QUERIES = 1000, MAX READ ROWS = 1000, FOR RANDOMIZED INTERVAL 2 YEAR MAX RESULT BYTES = 30000, MAX READ BYTES = 20000, MAX EXECUTION TIME = 120 TO default\n" + expected_usage = "myQuota key=\\\\'default\\\\' interval=\[.*\] queries=1/1000 errors=0 result_rows=50 result_bytes=200 read_rows=50/1000 read_bytes=200 execution_time=.*\n"\ + "myQuota key=\\\\'default\\\\' interval=\[.*\] queries=0 errors=0 result_rows=0 result_bytes=0/30000 read_rows=0 read_bytes=0/20000 execution_time=0/120" + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) + + # Drop interval, add quota. + copy_quota_xml('two_quotas.xml') + assert instance.query("SHOW QUOTAS") == "myQuota\nmyQuota2\n" + assert instance.query("SHOW CREATE QUOTA myQuota") == "CREATE QUOTA myQuota KEYED BY \\'user name\\' FOR INTERVAL 1 YEAR MAX QUERIES = 1000, MAX READ ROWS = 1000 TO default\n" + assert instance.query("SHOW CREATE QUOTA myQuota2") == "CREATE QUOTA myQuota2 KEYED BY \\'client key or user name\\' FOR RANDOMIZED INTERVAL 1 HOUR MAX RESULT ROWS = 4000, MAX RESULT BYTES = 400000, MAX READ ROWS = 4000, MAX READ BYTES = 400000, MAX EXECUTION TIME = 60, FOR INTERVAL 1 MONTH MAX EXECUTION TIME = 1800\n" + expected_usage = "myQuota key=\\\\'default\\\\' interval=\[.*\] queries=1/1000 errors=0 result_rows=50 result_bytes=200 read_rows=50/1000 read_bytes=200 execution_time=.*" + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) + + +def test_dcl_management(): + copy_quota_xml('no_quotas.xml') + assert instance.query("SHOW QUOTAS") == "" + assert instance.query("SHOW QUOTA USAGE") == "" + + instance.query("CREATE QUOTA qA FOR INTERVAL 15 MONTH SET MAX QUERIES = 123 TO CURRENT_USER") + assert instance.query("SHOW QUOTAS") == "qA\n" + assert instance.query("SHOW CREATE QUOTA qA") == "CREATE QUOTA qA KEYED BY \\'none\\' FOR INTERVAL 5 QUARTER MAX QUERIES = 123 TO default\n" + expected_usage = "qA key=\\\\'\\\\' interval=\[.*\] queries=0/123 errors=0 result_rows=0 result_bytes=0 read_rows=0 read_bytes=0 execution_time=.*" + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) + + instance.query("SELECT * from test_table") + expected_usage = "qA key=\\\\'\\\\' interval=\[.*\] queries=1/123 errors=0 result_rows=50 result_bytes=200 read_rows=50 read_bytes=200 execution_time=.*" + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) + + instance.query("ALTER QUOTA qA FOR INTERVAL 15 MONTH MAX QUERIES = 321, MAX ERRORS = 10, FOR INTERVAL 0.5 HOUR MAX EXECUTION TIME = 0.5") + assert instance.query("SHOW CREATE QUOTA qA") == "CREATE QUOTA qA KEYED BY \\'none\\' FOR INTERVAL 30 MINUTE MAX EXECUTION TIME = 0.5, FOR INTERVAL 5 QUARTER MAX QUERIES = 321, MAX ERRORS = 10 TO default\n" + expected_usage = "qA key=\\\\'\\\\' interval=\[.*\] queries=0 errors=0 result_rows=0 result_bytes=0 read_rows=0 read_bytes=0 execution_time=.*/0.5\n"\ + "qA key=\\\\'\\\\' interval=\[.*\] queries=1/321 errors=0/10 result_rows=50 result_bytes=200 read_rows=50 read_bytes=200 execution_time=.*" + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) + + instance.query("ALTER QUOTA qA FOR INTERVAL 15 MONTH UNSET TRACKING, FOR RANDOMIZED INTERVAL 16 MONTH SET TRACKING, FOR INTERVAL 1800 SECOND UNSET TRACKING") + assert instance.query("SHOW CREATE QUOTA qA") == "CREATE QUOTA qA KEYED BY \\'none\\' FOR RANDOMIZED INTERVAL 16 MONTH TRACKING TO default\n" + expected_usage = "qA key=\\\\'\\\\' interval=\[.*\] queries=0 errors=0 result_rows=0 result_bytes=0 read_rows=0 read_bytes=0 execution_time=.*" + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) + + instance.query("SELECT * from test_table") + expected_usage = "qA key=\\\\'\\\\' interval=\[.*\] queries=1 errors=0 result_rows=50 result_bytes=200 read_rows=50 read_bytes=200 execution_time=.*" + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) + + instance.query("ALTER QUOTA qA RENAME TO qB") + assert instance.query("SHOW CREATE QUOTA qB") == "CREATE QUOTA qB KEYED BY \\'none\\' FOR RANDOMIZED INTERVAL 16 MONTH TRACKING TO default\n" + expected_usage = "qB key=\\\\'\\\\' interval=\[.*\] queries=1 errors=0 result_rows=50 result_bytes=200 read_rows=50 read_bytes=200 execution_time=.*" + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) + + instance.query("DROP QUOTA qB") + assert instance.query("SHOW QUOTAS") == "" + assert instance.query("SHOW QUOTA USAGE") == "" + + +def test_users_xml_is_readonly(): + assert re.search("storage is readonly", instance.query_and_get_error("DROP QUOTA myQuota")) diff --git a/dbms/tests/integration/test_quota/tiny_limits.xml b/dbms/tests/integration/test_quota/tiny_limits.xml new file mode 100644 index 00000000000..3ab8858738a --- /dev/null +++ b/dbms/tests/integration/test_quota/tiny_limits.xml @@ -0,0 +1,17 @@ + + + + + + + 31556952 + + + 1 + 1 + 1 + 1 + + + + diff --git a/dbms/tests/integration/test_quota/tracking.xml b/dbms/tests/integration/test_quota/tracking.xml new file mode 100644 index 00000000000..47e12bf8005 --- /dev/null +++ b/dbms/tests/integration/test_quota/tracking.xml @@ -0,0 +1,17 @@ + + + + + + + 31556952 + + + 0 + 0 + 0 + 0 + + + + diff --git a/dbms/tests/integration/test_quota/two_intervals.xml b/dbms/tests/integration/test_quota/two_intervals.xml new file mode 100644 index 00000000000..d0de605b895 --- /dev/null +++ b/dbms/tests/integration/test_quota/two_intervals.xml @@ -0,0 +1,20 @@ + + + + + + 31556952 + 1000 + 1000 + + + + true + 63113904 + 20000 + 30000 + 120 + + + + diff --git a/dbms/tests/integration/test_quota/two_quotas.xml b/dbms/tests/integration/test_quota/two_quotas.xml new file mode 100644 index 00000000000..c08cc82aca7 --- /dev/null +++ b/dbms/tests/integration/test_quota/two_quotas.xml @@ -0,0 +1,29 @@ + + + + + + 31556952 + 1000 + 1000 + + + + + + + true + 3600 + 4000 + 4000 + 400000 + 400000 + 60 + + + 2629746 + 1800 + + + + diff --git a/dbms/tests/integration/test_read_temporary_tables_on_failure/__init__.py b/dbms/tests/integration/test_read_temporary_tables_on_failure/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/integration/test_read_temporary_tables_on_failure/test.py b/dbms/tests/integration/test_read_temporary_tables_on_failure/test.py new file mode 100644 index 00000000000..ad1a41b8979 --- /dev/null +++ b/dbms/tests/integration/test_read_temporary_tables_on_failure/test.py @@ -0,0 +1,26 @@ +import pytest +import time + +from helpers.cluster import ClickHouseCluster +from helpers.client import QueryTimeoutExceedException, QueryRuntimeException + +cluster = ClickHouseCluster(__file__) + +node = cluster.add_instance('node') + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + + yield cluster + finally: + cluster.shutdown() + +def test_different_versions(start_cluster): + with pytest.raises(QueryTimeoutExceedException): + node.query("SELECT sleep(3)", timeout=1) + with pytest.raises(QueryRuntimeException): + node.query("SELECT 1", settings={'max_concurrent_queries_for_user': 1}) + assert node.contains_in_log('Too many simultaneous queries for user') + assert not node.contains_in_log('Unknown packet') diff --git a/dbms/tests/integration/test_relative_filepath/__init__.py b/dbms/tests/integration/test_relative_filepath/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/integration/test_relative_filepath/configs/config.xml b/dbms/tests/integration/test_relative_filepath/configs/config.xml new file mode 100644 index 00000000000..dbc339c4741 --- /dev/null +++ b/dbms/tests/integration/test_relative_filepath/configs/config.xml @@ -0,0 +1,4 @@ + + + user_files + diff --git a/dbms/tests/integration/test_relative_filepath/test.py b/dbms/tests/integration/test_relative_filepath/test.py new file mode 100644 index 00000000000..a8e2341a3cd --- /dev/null +++ b/dbms/tests/integration/test_relative_filepath/test.py @@ -0,0 +1,36 @@ +import pytest + +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance('node', main_configs=['configs/config.xml']) +path_to_userfiles_from_defaut_config = "user_files" + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + +def test_filepath(start_cluster): + # 2 rows data + some_data = "Test\t111.222\nData\t333.444" + + node.exec_in_container(['bash', '-c', 'mkdir -p {}'.format( + path_to_userfiles_from_defaut_config + )], privileged=True, user='root') + + node.exec_in_container(['bash', '-c', 'echo "{}" > {}'.format( + some_data, + path_to_userfiles_from_defaut_config + "/relative_user_file_test" + )], privileged=True, user='root') + + test_requests = [("relative_user_file_test", "2"), + ("../" + path_to_userfiles_from_defaut_config + "/relative_user_file_test", "2")] + + for pattern, value in test_requests: + assert node.query(''' + select count() from file('{}', 'TSV', 'text String, number Float64') + '''.format(pattern)) == '{}\n'.format(value) diff --git a/dbms/tests/integration/test_storage_s3/configs/config_for_test_remote_host_filter.xml b/dbms/tests/integration/test_storage_s3/configs/config_for_test_remote_host_filter.xml new file mode 100644 index 00000000000..98a9d547705 --- /dev/null +++ b/dbms/tests/integration/test_storage_s3/configs/config_for_test_remote_host_filter.xml @@ -0,0 +1,5 @@ + + + ^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4}$ + + diff --git a/dbms/tests/integration/test_storage_s3/test.py b/dbms/tests/integration/test_storage_s3/test.py index 1db472e3019..6fbf25f8fa8 100644 --- a/dbms/tests/integration/test_storage_s3/test.py +++ b/dbms/tests/integration/test_storage_s3/test.py @@ -5,6 +5,9 @@ import pytest from helpers.cluster import ClickHouseCluster, ClickHouseInstance +import helpers.client + + logging.getLogger().setLevel(logging.INFO) logging.getLogger().addHandler(logging.StreamHandler()) @@ -53,12 +56,18 @@ def prepare_s3_bucket(cluster): minio_client.set_bucket_policy(cluster.minio_bucket, json.dumps(bucket_read_write_policy)) + cluster.minio_restricted_bucket = "{}-with-auth".format(cluster.minio_bucket) + if minio_client.bucket_exists(cluster.minio_restricted_bucket): + minio_client.remove_bucket(cluster.minio_restricted_bucket) + + minio_client.make_bucket(cluster.minio_restricted_bucket) + # Returns content of given S3 file as string. -def get_s3_file_content(cluster, filename): +def get_s3_file_content(cluster, bucket, filename): # type: (ClickHouseCluster, str) -> str - data = cluster.minio_client.get_object(cluster.minio_bucket, filename) + data = cluster.minio_client.get_object(bucket, filename) data_str = "" for chunk in data.stream(): data_str += chunk @@ -77,6 +86,7 @@ def get_nginx_access_logs(): def cluster(): try: cluster = ClickHouseCluster(__file__) + cluster.add_instance("restricted_dummy", main_configs=["configs/config_for_test_remote_host_filter.xml"], with_minio=True) cluster.add_instance("dummy", with_minio=True) logging.info("Starting cluster...") cluster.start() @@ -101,53 +111,76 @@ def run_query(instance, query, stdin=None, settings=None): # Test simple put. -def test_put(cluster): +@pytest.mark.parametrize("maybe_auth,positive", [ + ("",True), + ("'minio','minio123',",True), + ("'wrongid','wrongkey',",False) +]) +def test_put(cluster, maybe_auth, positive): # type: (ClickHouseCluster) -> None + bucket = cluster.minio_bucket if not maybe_auth else cluster.minio_restricted_bucket instance = cluster.instances["dummy"] # type: ClickHouseInstance table_format = "column1 UInt32, column2 UInt32, column3 UInt32" values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)" values_csv = "1,2,3\n3,2,1\n78,43,45\n" filename = "test.csv" - put_query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format( - cluster.minio_host, cluster.minio_port, cluster.minio_bucket, filename, table_format, values) - run_query(instance, put_query) + put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') values {}".format( + cluster.minio_host, cluster.minio_port, bucket, filename, maybe_auth, table_format, values) - assert values_csv == get_s3_file_content(cluster, filename) + try: + run_query(instance, put_query) + except helpers.client.QueryRuntimeException: + assert not positive + else: + assert positive + assert values_csv == get_s3_file_content(cluster, bucket, filename) # Test put values in CSV format. -def test_put_csv(cluster): +@pytest.mark.parametrize("maybe_auth,positive", [ + ("",True), + ("'minio','minio123',",True), + ("'wrongid','wrongkey',",False) +]) +def test_put_csv(cluster, maybe_auth, positive): # type: (ClickHouseCluster) -> None + bucket = cluster.minio_bucket if not maybe_auth else cluster.minio_restricted_bucket instance = cluster.instances["dummy"] # type: ClickHouseInstance table_format = "column1 UInt32, column2 UInt32, column3 UInt32" filename = "test.csv" - put_query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') format CSV".format( - cluster.minio_host, cluster.minio_port, cluster.minio_bucket, filename, table_format) + put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format( + cluster.minio_host, cluster.minio_port, bucket, filename, maybe_auth, table_format) csv_data = "8,9,16\n11,18,13\n22,14,2\n" - run_query(instance, put_query, stdin=csv_data) - assert csv_data == get_s3_file_content(cluster, filename) + try: + run_query(instance, put_query, stdin=csv_data) + except helpers.client.QueryRuntimeException: + assert not positive + else: + assert positive + assert csv_data == get_s3_file_content(cluster, bucket, filename) # Test put and get with S3 server redirect. def test_put_get_with_redirect(cluster): # type: (ClickHouseCluster) -> None + bucket = cluster.minio_bucket instance = cluster.instances["dummy"] # type: ClickHouseInstance table_format = "column1 UInt32, column2 UInt32, column3 UInt32" values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)" values_csv = "1,1,1\n1,1,1\n11,11,11\n" filename = "test.csv" query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format( - cluster.minio_redirect_host, cluster.minio_redirect_port, cluster.minio_bucket, filename, table_format, values) + cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, filename, table_format, values) run_query(instance, query) - assert values_csv == get_s3_file_content(cluster, filename) + assert values_csv == get_s3_file_content(cluster, bucket, filename) query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/{}', 'CSV', '{}')".format( - cluster.minio_redirect_host, cluster.minio_redirect_port, cluster.minio_bucket, filename, table_format) + cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, filename, table_format) stdout = run_query(instance, query) assert list(map(str.split, stdout.splitlines())) == [ @@ -158,9 +191,15 @@ def test_put_get_with_redirect(cluster): # Test multipart put. -def test_multipart_put(cluster): +@pytest.mark.parametrize("maybe_auth,positive", [ + ("",True), + ("'minio','minio123',",True), + ("'wrongid','wrongkey',",False) +]) +def test_multipart_put(cluster, maybe_auth, positive): # type: (ClickHouseCluster) -> None + bucket = cluster.minio_bucket if not maybe_auth else cluster.minio_restricted_bucket instance = cluster.instances["dummy"] # type: ClickHouseInstance table_format = "column1 UInt32, column2 UInt32, column3 UInt32" @@ -178,14 +217,31 @@ def test_multipart_put(cluster): assert len(csv_data) > min_part_size_bytes filename = "test_multipart.csv" - put_query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') format CSV".format( - cluster.minio_redirect_host, cluster.minio_redirect_port, cluster.minio_bucket, filename, table_format) + put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format( + cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, filename, maybe_auth, table_format) - run_query(instance, put_query, stdin=csv_data, settings={'s3_min_upload_part_size': min_part_size_bytes}) + try: + run_query(instance, put_query, stdin=csv_data, settings={'s3_min_upload_part_size': min_part_size_bytes}) + except helpers.client.QueryRuntimeException: + assert not positive + else: + assert positive - # Use Nginx access logs to count number of parts uploaded to Minio. - nginx_logs = get_nginx_access_logs() - uploaded_parts = filter(lambda log_line: log_line.find(filename) >= 0 and log_line.find("PUT") >= 0, nginx_logs) - assert uploaded_parts > 1 + # Use Nginx access logs to count number of parts uploaded to Minio. + nginx_logs = get_nginx_access_logs() + uploaded_parts = filter(lambda log_line: log_line.find(filename) >= 0 and log_line.find("PUT") >= 0, nginx_logs) + assert uploaded_parts > 1 - assert csv_data == get_s3_file_content(cluster, filename) + assert csv_data == get_s3_file_content(cluster, bucket, filename) + + +def test_remote_host_filter(cluster): + instance = cluster.instances["restricted_dummy"] + format = "column1 UInt32, column2 UInt32, column3 UInt32" + + query = "select *, column1*column2*column3 from s3('http://{}:{}/', 'CSV', '{}')".format("invalid_host", cluster.minio_redirect_port, format) + assert "not allowed in config.xml" in instance.query_and_get_error(query) + + other_values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)" + query = "insert into table function s3('http://{}:{}/{}/test.csv', 'CSV', '{}') values {}".format("invalid_host", cluster.minio_port, cluster.minio_bucket, format, other_values) + assert "not allowed in config.xml" in instance.query_and_get_error(query) diff --git a/dbms/tests/integration/test_system_merges/__init__.py b/dbms/tests/integration/test_system_merges/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/integration/test_system_merges/configs/config.d/cluster.xml b/dbms/tests/integration/test_system_merges/configs/config.d/cluster.xml new file mode 100644 index 00000000000..ec7c9b8e4f8 --- /dev/null +++ b/dbms/tests/integration/test_system_merges/configs/config.d/cluster.xml @@ -0,0 +1,16 @@ + + + + + + node1 + 9000 + + + node2 + 9000 + + + + + \ No newline at end of file diff --git a/dbms/tests/integration/test_system_merges/configs/logs_config.xml b/dbms/tests/integration/test_system_merges/configs/logs_config.xml new file mode 100644 index 00000000000..bdf1bbc11c1 --- /dev/null +++ b/dbms/tests/integration/test_system_merges/configs/logs_config.xml @@ -0,0 +1,17 @@ + + 3 + + trace + /var/log/clickhouse-server/log.log + /var/log/clickhouse-server/log.err.log + 1000M + 10 + /var/log/clickhouse-server/stderr.log + /var/log/clickhouse-server/stdout.log + + + system + part_log
+ 500 +
+
diff --git a/dbms/tests/integration/test_system_merges/test.py b/dbms/tests/integration/test_system_merges/test.py new file mode 100644 index 00000000000..7b638ce05c7 --- /dev/null +++ b/dbms/tests/integration/test_system_merges/test.py @@ -0,0 +1,160 @@ +import pytest +import threading +import time +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) + +node1 = cluster.add_instance('node1', + config_dir='configs', + main_configs=['configs/logs_config.xml'], + with_zookeeper=True, + macros={"shard": 0, "replica": 1} ) + +node2 = cluster.add_instance('node2', + config_dir='configs', + main_configs=['configs/logs_config.xml'], + with_zookeeper=True, + macros={"shard": 0, "replica": 2} ) + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def split_tsv(data): + return [ x.split("\t") for x in data.splitlines() ] + + +@pytest.mark.parametrize("replicated", [ + "", + "replicated" +]) +def test_merge_simple(started_cluster, replicated): + try: + clickhouse_path = "/var/lib/clickhouse" + name = "test_merge_simple" + nodes = [node1, node2] if replicated else [node1] + engine = "ReplicatedMergeTree('/clickhouse/test_merge_simple', '{replica}')" if replicated else "MergeTree()" + node_check = nodes[-1] + starting_block = 0 if replicated else 1 + + for node in nodes: + node.query(""" + CREATE TABLE {name} + ( + `a` Int64 + ) + ENGINE = {engine} + ORDER BY sleep(2) + """.format(engine=engine, name=name)) + + node1.query("INSERT INTO {name} VALUES (1)".format(name=name)) + node1.query("INSERT INTO {name} VALUES (2)".format(name=name)) + node1.query("INSERT INTO {name} VALUES (3)".format(name=name)) + + parts = ["all_{}_{}_0".format(x, x) for x in range(starting_block, starting_block+3)] + result_part = "all_{}_{}_1".format(starting_block, starting_block+2) + + def optimize(): + node1.query("OPTIMIZE TABLE {name}".format(name=name)) + + wait = threading.Thread(target=time.sleep, args=(5,)) + wait.start() + t = threading.Thread(target=optimize) + t.start() + + time.sleep(1) + assert split_tsv(node_check.query(""" + SELECT database, table, num_parts, source_part_names, source_part_paths, result_part_name, result_part_path, partition_id, is_mutation + FROM system.merges + WHERE table = '{name}' + """.format(name=name))) == [ + [ + "default", + name, + "3", + "['{}','{}','{}']".format(*parts), + "['{clickhouse}/data/default/{name}/{}/','{clickhouse}/data/default/{name}/{}/','{clickhouse}/data/default/{name}/{}/']".format(*parts, clickhouse=clickhouse_path, name=name), + result_part, + "{clickhouse}/data/default/{name}/{}/".format(result_part, clickhouse=clickhouse_path, name=name), + "all", + "0" + ] + ] + t.join() + wait.join() + + assert node_check.query("SELECT * FROM system.merges WHERE table = '{name}'".format(name=name)) == "" + + finally: + for node in nodes: + node.query("DROP TABLE {name}".format(name=name)) + + +@pytest.mark.parametrize("replicated", [ + "", + "replicated" +]) +def test_mutation_simple(started_cluster, replicated): + try: + clickhouse_path = "/var/lib/clickhouse" + name = "test_mutation_simple" + nodes = [node1, node2] if replicated else [node1] + engine = "ReplicatedMergeTree('/clickhouse/test_mutation_simple', '{replica}')" if replicated else "MergeTree()" + node_check = nodes[-1] + starting_block = 0 if replicated else 1 + + for node in nodes: + node.query(""" + CREATE TABLE {name} + ( + `a` Int64 + ) + ENGINE = {engine} + ORDER BY tuple() + """.format(engine=engine, name=name)) + + node1.query("INSERT INTO {name} VALUES (1)".format(name=name)) + part = "all_{}_{}_0".format(starting_block, starting_block) + result_part = "all_{}_{}_0_{}".format(starting_block, starting_block, starting_block+1) + + def alter(): + node1.query("ALTER TABLE {name} UPDATE a = 42 WHERE sleep(2) OR 1".format(name=name)) + + t = threading.Thread(target=alter) + t.start() + + time.sleep(1) + assert split_tsv(node_check.query(""" + SELECT database, table, num_parts, source_part_names, source_part_paths, result_part_name, result_part_path, partition_id, is_mutation + FROM system.merges + WHERE table = '{name}' + """.format(name=name))) == [ + [ + "default", + name, + "1", + "['{}']".format(part), + "['{clickhouse}/data/default/{name}/{}/']".format(part, clickhouse=clickhouse_path, name=name), + result_part, + "{clickhouse}/data/default/{name}/{}/".format(result_part, clickhouse=clickhouse_path, name=name), + "all", + "1" + ], + ] + t.join() + + time.sleep(1.5) + + assert node_check.query("SELECT * FROM system.merges WHERE table = '{name}'".format(name=name)) == "" + + finally: + for node in nodes: + node.query("DROP TABLE {name}".format(name=name)) diff --git a/dbms/tests/performance/README.md b/dbms/tests/performance/README.md index 0a78fe481b2..ecda08a80b1 100644 --- a/dbms/tests/performance/README.md +++ b/dbms/tests/performance/README.md @@ -22,6 +22,8 @@ You can use `substitions`, `create`, `fill` and `drop` queries to prepare test. Take into account, that these tests will run in CI which consists of 56-cores and 512 RAM machines. Queries will be executed much faster than on local laptop. +If your test continued more than 10 minutes, please, add tag `long` to have an opportunity to run all tests and skip long ones. + ### How to run performance test You have to run clickhouse-server and after you can start testing: diff --git a/dbms/tests/performance/and_function.xml b/dbms/tests/performance/and_function.xml new file mode 100644 index 00000000000..08fd07ea7e5 --- /dev/null +++ b/dbms/tests/performance/and_function.xml @@ -0,0 +1,22 @@ + + loop + + + + 3 + 10000 + + + 5 + 60000 + + + + + + + + select count() from numbers(10000000) where number != 96594 AND number != 18511 AND number != 98085 AND number != 84177 AND number != 70314 AND number != 28083 AND number != 54202 AND number != 66522 AND number != 66939 AND number != 99469 AND number != 65776 AND number != 22876 AND number != 42151 AND number != 19924 AND number != 66681 AND number != 63022 AND number != 17487 AND number != 83914 AND number != 59754 AND number != 968 AND number != 73334 AND number != 68569 AND number != 49853 AND number != 33155 AND number != 31777 AND number != 99698 AND number != 26708 AND number != 76409 AND number != 42191 AND number != 55397 AND number != 25724 AND number != 39170 AND number != 22728 AND number != 98238 AND number != 86052 AND number != 12756 AND number != 13948 AND number != 57774 AND number != 82511 AND number != 11337 AND number != 23506 AND number != 11875 AND number != 58536 AND number != 56919 AND number != 25986 AND number != 80710 AND number != 61797 AND number != 99244 AND number != 11665 AND number != 15758 AND number != 82899 AND number != 63150 AND number != 7198 AND number != 40071 AND number != 46310 AND number != 78488 AND number != 9273 AND number != 91878 AND number != 57904 AND number != 53941 AND number != 75675 AND number != 12093 AND number != 50090 AND number != 59675 AND number != 41632 AND number != 81448 AND number != 46821 AND number != 51919 AND number != 49028 AND number != 71059 AND number != 15673 AND number != 6132 AND number != 15473 AND number != 32527 AND number != 63842 AND number != 33121 AND number != 53271 AND number != 86033 AND number != 96807 AND number != 4791 AND number != 80089 AND number != 51616 AND number != 46311 AND number != 82844 AND number != 59353 AND number != 63538 AND number != 64857 AND number != 58471 AND number != 29870 AND number != 80209 AND number != 61000 AND number != 75991 AND number != 44506 AND number != 11283 AND number != 6335 AND number != 73502 AND number != 22354 AND number != 72816 AND number != 66399 AND number != 61703 + + select count() from numbers(10000000) where number != 96594 AND number != 18511 AND number != 98085 AND number != 84177 AND number != 70314 AND number != 28083 AND number != 54202 AND number != 66522 AND number != 66939 AND number != 99469 + diff --git a/dbms/tests/performance/collations.xml b/dbms/tests/performance/collations.xml new file mode 100644 index 00000000000..9bc48d76bce --- /dev/null +++ b/dbms/tests/performance/collations.xml @@ -0,0 +1,25 @@ + + loop + + + + 5 + 10000 + + + 100 + 60000 + + + + + + + + + test.hits + + + SELECT Title FROM test.hits ORDER BY Title DESC LIMIT 1000, 10 + SELECT Title FROM test.hits ORDER BY Title DESC COLLATE 'tr' LIMIT 1000, 10 + diff --git a/dbms/tests/performance/general_purpose_hashes_on_UUID.xml b/dbms/tests/performance/general_purpose_hashes_on_UUID.xml new file mode 100644 index 00000000000..23e00909bbe --- /dev/null +++ b/dbms/tests/performance/general_purpose_hashes_on_UUID.xml @@ -0,0 +1,54 @@ + + loop + + + + 3 + 10000 + + + 5 + 60000 + + + + + + + + + + hash_func + + cityHash64 + farmHash64 + metroHash64 + murmurHash2_32 + murmurHash2_64 + murmurHash3_32 + murmurHash3_64 + javaHash + hiveHash + xxHash32 + xxHash64 + + + + value + + toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') + toDecimal128(number, 23) + generateUUIDv4() + + + + table + + numbers(1000000) + numbers_mt(10000000) + + + + + SELECT count() from {table} where not ignore({hash_func}({value})) + diff --git a/dbms/tests/performance/grear_circle_dist.xml b/dbms/tests/performance/great_circle_dist.xml similarity index 57% rename from dbms/tests/performance/grear_circle_dist.xml rename to dbms/tests/performance/great_circle_dist.xml index bb26605bd89..99382543d60 100644 --- a/dbms/tests/performance/grear_circle_dist.xml +++ b/dbms/tests/performance/great_circle_dist.xml @@ -9,7 +9,8 @@ - SELECT count() FROM system.numbers WHERE NOT ignore(greatCircleDistance((rand() % 360) * 1. - 180, (number % 150) * 1.2 - 90, (number % 360) + toFloat64(rand()) / 4294967296 - 180, (rand() % 180) * 1. - 90)) + SELECT count() FROM system.numbers WHERE NOT ignore(greatCircleDistance((rand(1) % 360) * 1. - 180, (number % 150) * 1.2 - 90, (number % 360) + toFloat64(rand(2)) / 4294967296 - 180, (rand(3) % 180) * 1. - 90)) - SELECT count() FROM system.numbers WHERE NOT ignore(greatCircleDistance(55. + toFloat64(rand()) / 4294967296, 37. + toFloat64(rand()) / 4294967296, 55. + toFloat64(rand()) / 4294967296, 37. + toFloat64(rand()) / 4294967296)) + SELECT count() FROM system.numbers WHERE NOT ignore(greatCircleDistance(55. + toFloat64(rand(1)) / 4294967296, 37. + toFloat64(rand(2)) / 4294967296, 55. + toFloat64(rand(3)) / 4294967296, 37. + toFloat64(rand(4)) / 4294967296)) + diff --git a/dbms/tests/performance/parse_engine_file.xml b/dbms/tests/performance/parse_engine_file.xml index 6bd4af0b45b..8308d8f049f 100644 --- a/dbms/tests/performance/parse_engine_file.xml +++ b/dbms/tests/performance/parse_engine_file.xml @@ -32,6 +32,8 @@ CSVWithNames Values JSONEachRow + JSONCompactEachRow + JSONCompactEachRowWithNamesAndTypes TSKV RowBinary Native diff --git a/dbms/tests/performance/select_format.xml b/dbms/tests/performance/select_format.xml index c5ad1acd396..55ab7b2d458 100644 --- a/dbms/tests/performance/select_format.xml +++ b/dbms/tests/performance/select_format.xml @@ -34,6 +34,7 @@ JSON JSONCompact JSONEachRow + JSONCompactEachRow TSKV Pretty PrettyCompact diff --git a/dbms/tests/queries/0_stateless/00105_shard_collations.reference b/dbms/tests/queries/0_stateless/00105_shard_collations.reference index 3ff09ff2f2d..3780e7deb0c 100644 --- a/dbms/tests/queries/0_stateless/00105_shard_collations.reference +++ b/dbms/tests/queries/0_stateless/00105_shard_collations.reference @@ -1,15 +1,18 @@ +Русский (default) Ё А Я а я ё +Русский (ru) а А ё Ё я Я +Русский (ru distributed) а а А @@ -22,6 +25,7 @@ я Я Я +Türk (default) A A B @@ -132,6 +136,7 @@ z ı Ş ş +Türk (tr) a a A @@ -242,9 +247,62 @@ z z Z Z +english (default) +A +Q +Z +c +e +english (en_US) +A +c +e +Q +Z +english (en) +A +c +e +Q +Z +español (default) +F +J +z +Ñ +español (es) +F +J +Ñ +z +Український (default) +І +Б +ї +ґ +Український (uk) +Б +ґ +І +ї +Русский (ru group by) а 1 А 4 ё 3 Ё 6 я 2 Я 5 +ζ +0 +1 +0 +1 +10 +2 +3 +4 +5 +6 +7 +8 +9 diff --git a/dbms/tests/queries/0_stateless/00105_shard_collations.sql b/dbms/tests/queries/0_stateless/00105_shard_collations.sql index 174992419e2..a73c441cc19 100644 --- a/dbms/tests/queries/0_stateless/00105_shard_collations.sql +++ b/dbms/tests/queries/0_stateless/00105_shard_collations.sql @@ -1,6 +1,49 @@ +SELECT 'Русский (default)'; SELECT arrayJoin(['а', 'я', 'ё', 'А', 'Я', 'Ё']) AS x ORDER BY x; + +SELECT 'Русский (ru)'; SELECT arrayJoin(['а', 'я', 'ё', 'А', 'Я', 'Ё']) AS x ORDER BY x COLLATE 'ru'; + +SELECT 'Русский (ru distributed)'; SELECT arrayJoin(['а', 'я', 'ё', 'А', 'Я', 'Ё']) AS x FROM remote('127.0.0.{2,3}', system, one) ORDER BY x COLLATE 'ru'; + +SELECT 'Türk (default)'; SELECT arrayJoin(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'ç', 'd', 'e', 'f', 'g', 'ğ', 'h', 'ı', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'ö', 'p', 'r', 's', 'ş', 't', 'u', 'ü', 'v', 'y', 'z', 'A', 'B', 'C', 'Ç', 'D', 'E', 'F', 'G', 'Ğ', 'H', 'I', 'İ', 'J', 'K', 'L', 'M', 'N', 'O', 'Ö', 'P', 'R', 'S', 'Ş', 'T', 'U', 'Ü', 'V', 'Y', 'Z']) AS x ORDER BY x; + +SELECT 'Türk (tr)'; SELECT arrayJoin(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'ç', 'd', 'e', 'f', 'g', 'ğ', 'h', 'ı', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'ö', 'p', 'r', 's', 'ş', 't', 'u', 'ü', 'v', 'y', 'z', 'A', 'B', 'C', 'Ç', 'D', 'E', 'F', 'G', 'Ğ', 'H', 'I', 'İ', 'J', 'K', 'L', 'M', 'N', 'O', 'Ö', 'P', 'R', 'S', 'Ş', 'T', 'U', 'Ü', 'V', 'Y', 'Z']) AS x ORDER BY x COLLATE 'tr'; + +SELECT 'english (default)'; +SELECT arrayJoin(['A', 'c', 'Z', 'Q', 'e']) AS x ORDER BY x; +SELECT 'english (en_US)'; +SELECT arrayJoin(['A', 'c', 'Z', 'Q', 'e']) AS x ORDER BY x COLLATE 'en_US'; +SELECT 'english (en)'; +SELECT arrayJoin(['A', 'c', 'Z', 'Q', 'e']) AS x ORDER BY x COLLATE 'en'; + +SELECT 'español (default)'; +SELECT arrayJoin(['F', 'z', 'J', 'Ñ']) as x ORDER BY x; +SELECT 'español (es)'; +SELECT arrayJoin(['F', 'z', 'J', 'Ñ']) as x ORDER BY x COLLATE 'es'; + +SELECT 'Український (default)'; +SELECT arrayJoin(['ґ', 'ї', 'І', 'Б']) as x ORDER BY x; +SELECT 'Український (uk)'; +SELECT arrayJoin(['ґ', 'ї', 'І', 'Б']) as x ORDER BY x COLLATE 'uk'; + +SELECT 'Русский (ru group by)'; SELECT x, n FROM (SELECT ['а', 'я', 'ё', 'А', 'Я', 'Ё'] AS arr) ARRAY JOIN arr AS x, arrayEnumerate(arr) AS n ORDER BY x COLLATE 'ru', n; + +--- Const expression +SELECT 'ζ' as x ORDER BY x COLLATE 'el'; + +-- check order by const with collation +SELECT number FROM numbers(2) ORDER BY 'x' COLLATE 'el'; + +-- check const and non const columns in order +SELECT number FROM numbers(11) ORDER BY 'x', toString(number), 'y' COLLATE 'el'; + +--- Trash locales +SELECT '' as x ORDER BY x COLLATE 'qq'; --{serverError 186} +SELECT '' as x ORDER BY x COLLATE 'qwe'; --{serverError 186} +SELECT '' as x ORDER BY x COLLATE 'some_non_existing_locale'; --{serverError 186} +SELECT '' as x ORDER BY x COLLATE 'ру'; --{serverError 186} diff --git a/dbms/tests/queries/0_stateless/00203_full_join.reference b/dbms/tests/queries/0_stateless/00203_full_join.reference index eedd5818063..d97597d17d3 100644 --- a/dbms/tests/queries/0_stateless/00203_full_join.reference +++ b/dbms/tests/queries/0_stateless/00203_full_join.reference @@ -43,3 +43,5 @@ Hello [0,1,2] 5 6 7 ddd 2 3 4 bbb ccc 5 6 7 ddd +2 3 4 bbb ccc +5 6 7 ddd diff --git a/dbms/tests/queries/0_stateless/00203_full_join.sql b/dbms/tests/queries/0_stateless/00203_full_join.sql index 0250aef35eb..9b07e9c84e7 100644 --- a/dbms/tests/queries/0_stateless/00203_full_join.sql +++ b/dbms/tests/queries/0_stateless/00203_full_join.sql @@ -27,7 +27,7 @@ SELECT k1, k2, k3, val_t1, val_t2 FROM t1_00203 ANY RIGHT JOIN t2_00203 USING (k SET any_join_distinct_right_table_keys = 0; SELECT k1, k2, k3, val_t1, val_t2 FROM t1_00203 ANY FULL JOIN t2_00203 USING (k3, k1, k2 AS k2_alias) ORDER BY k1, k2, k3; -- { serverError 48 } -SELECT k1, k2, k3, val_t1, val_t2 FROM t1_00203 ANY RIGHT JOIN t2_00203 USING (k3, k1, k2 AS k2_alias) ORDER BY k1, k2, k3; -- { serverError 48 } +SELECT k1, k2, k3, val_t1, val_t2 FROM t1_00203 ANY RIGHT JOIN t2_00203 USING (k3, k1, k2 AS k2_alias) ORDER BY k1, k2, k3; DROP TABLE t1_00203; DROP TABLE t2_00203; diff --git a/dbms/tests/queries/0_stateless/00362_great_circle_distance.reference b/dbms/tests/queries/0_stateless/00362_great_circle_distance.reference index f3590f06943..f7b7549366e 100644 --- a/dbms/tests/queries/0_stateless/00362_great_circle_distance.reference +++ b/dbms/tests/queries/0_stateless/00362_great_circle_distance.reference @@ -1,3 +1,7 @@ -343417 -342558 0 +1 +1 +1 +1 +1 +1 diff --git a/dbms/tests/queries/0_stateless/00362_great_circle_distance.sql b/dbms/tests/queries/0_stateless/00362_great_circle_distance.sql index a0fa9bb1eae..62f9e83764d 100644 --- a/dbms/tests/queries/0_stateless/00362_great_circle_distance.sql +++ b/dbms/tests/queries/0_stateless/00362_great_circle_distance.sql @@ -1,6 +1,13 @@ -SELECT floor(greatCircleDistance(33.3, 55.3, 38.7, 55.1)) AS distance; -SELECT floor(greatCircleDistance(33.3 + v, 55.3 + v, 38.7 + v , 55.1 + v)) AS distance from -( - select number + 0.1 as v from system.numbers limit 1 -); SELECT floor(greatCircleDistance(33.3, 55.3, 33.3, 55.3)) AS distance; +-- consts are from vincenty formula from geopy +-- k = '158.756175, 53.006373' +-- u = '37.531014, 55.703050' +-- y = '37.588144, 55.733842' +-- m = '37.617780, 55.755830' +-- n = '83.089598, 54.842461' +select abs(greatCircleDistance(37.531014, 55.703050, 37.588144, 55.733842) - 4964.25740448) / 4964.25740448 < 0.004; +select abs(greatCircleDistance(37.531014, 55.703050, 37.617780, 55.755830) - 8015.52288508) / 8015.52288508 < 0.004; +select abs(greatCircleDistance(37.588144, 55.733842, 37.617780, 55.755830) - 3075.27332275) / 3075.27332275 < 0.004; +select abs(greatCircleDistance(83.089598, 54.842461, 37.617780, 55.755830) - 2837839.72863) / 2837839.72863 < 0.004; +select abs(greatCircleDistance(37.617780, 55.755830, 158.756175, 53.006373) - 6802821.68814) / 6802821.68814 < 0.004; +select abs(greatCircleDistance(83.089598, 54.842461, 158.756175, 53.006373) - 4727216.39539) / 4727216.39539 < 0.004; diff --git a/dbms/tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.sql b/dbms/tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.sql index c22b72e4126..ce52c652df0 100644 --- a/dbms/tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.sql +++ b/dbms/tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.sql @@ -20,7 +20,7 @@ FROM learnerHash, passed - eventTime AS diff FROM statements - GLOBAL ANY INNER JOIN + GLOBAL SEMI LEFT JOIN ( SELECT learnerHash, diff --git a/dbms/tests/queries/0_stateless/00679_replace_asterisk.sql b/dbms/tests/queries/0_stateless/00679_replace_asterisk.sql index 27ff799be62..19aa939b132 100644 --- a/dbms/tests/queries/0_stateless/00679_replace_asterisk.sql +++ b/dbms/tests/queries/0_stateless/00679_replace_asterisk.sql @@ -1,7 +1,6 @@ -set any_join_distinct_right_table_keys = 1; SET joined_subquery_requires_alias = 0; SELECT * FROM (SELECT 1 AS id, 2 AS value); -SELECT * FROM (SELECT 1 AS id, 2 AS value, 3 AS A) ANY INNER JOIN (SELECT 1 AS id, 4 AS values, 5 AS D) USING id; -SELECT *, d.* FROM ( SELECT 1 AS id, 2 AS value ) ANY INNER JOIN ( SELECT 1 AS id, 3 AS values ) AS d USING id; -SELECT *, d.*, d.values FROM ( SELECT 1 AS id, 2 AS value ) ANY INNER JOIN ( SELECT 1 AS id, 3 AS values ) AS d USING id; +SELECT * FROM (SELECT 1 AS id, 2 AS value, 3 AS A) SEMI LEFT JOIN (SELECT 1 AS id, 4 AS values, 5 AS D) USING id; +SELECT *, d.* FROM ( SELECT 1 AS id, 2 AS value ) SEMI LEFT JOIN ( SELECT 1 AS id, 3 AS values ) AS d USING id; +SELECT *, d.*, d.values FROM ( SELECT 1 AS id, 2 AS value ) SEMI LEFT JOIN ( SELECT 1 AS id, 3 AS values ) AS d USING id; diff --git a/dbms/tests/queries/0_stateless/00700_decimal_array_functions.reference b/dbms/tests/queries/0_stateless/00700_decimal_array_functions.reference new file mode 100644 index 00000000000..969a8dd2f18 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00700_decimal_array_functions.reference @@ -0,0 +1,20 @@ +[0.0000,1.0000] Array(Decimal(9, 4)) +[0.00000000,1.00000000] Array(Decimal(18, 8)) +[0.00000000,1.00000000] Array(Decimal(38, 8)) +- +1.0000 Decimal(38, 4) +1.00000000 Decimal(38, 8) +1.00000000 Decimal(38, 8) +- +[1.0000,2.0000] Array(Decimal(38, 4)) +[1.00000000,2.00000000] Array(Decimal(38, 8)) +[1.00000000,2.00000000] Array(Decimal(38, 8)) +- +[1.0000,2.0000] Array(Decimal(38, 4)) +[1.00000000,2.00000000] Array(Decimal(38, 8)) +[1.00000000,2.00000000] Array(Decimal(38, 8)) +- +[1.0000] Array(Decimal(9, 4)) +[1.00000000] Array(Decimal(18, 8)) +[1.00000000] Array(Decimal(38, 8)) +- diff --git a/dbms/tests/queries/0_stateless/00700_decimal_array_functions.sql b/dbms/tests/queries/0_stateless/00700_decimal_array_functions.sql new file mode 100644 index 00000000000..c76c8728e15 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00700_decimal_array_functions.sql @@ -0,0 +1,20 @@ +SELECT arrayDifference([toDecimal32(0.0,4), toDecimal32(1.0,4)]) x, toTypeName(x); +SELECT arrayDifference([toDecimal64(0.0,8), toDecimal64(1.0,8)]) x, toTypeName(x); +SELECT arrayDifference([toDecimal128(0.0,8), toDecimal128(1.0,8)]) x, toTypeName(x); +SELECT '-'; +SELECT arraySum([toDecimal32(0.0,4), toDecimal32(1.0,4)]) x, toTypeName(x); +SELECT arraySum([toDecimal64(0.0,8), toDecimal64(1.0,8)]) x, toTypeName(x); +SELECT arraySum([toDecimal128(0.0,8), toDecimal128(1.0,8)]) x, toTypeName(x); +SELECT '-'; +SELECT arrayCumSum([toDecimal32(1.0,4), toDecimal32(1.0,4)]) x, toTypeName(x); +SELECT arrayCumSum([toDecimal64(1.0,8), toDecimal64(1.0,8)]) x, toTypeName(x); +SELECT arrayCumSum([toDecimal128(1.0,8), toDecimal128(1.0,8)]) x, toTypeName(x); +SELECT '-'; +SELECT arrayCumSumNonNegative([toDecimal32(1.0,4), toDecimal32(1.0,4)]) x, toTypeName(x); +SELECT arrayCumSumNonNegative([toDecimal64(1.0,8), toDecimal64(1.0,8)]) x, toTypeName(x); +SELECT arrayCumSumNonNegative([toDecimal128(1.0,8), toDecimal128(1.0,8)]) x, toTypeName(x); +SELECT '-'; +SELECT arrayCompact([toDecimal32(1.0,4), toDecimal32(1.0,4)]) x, toTypeName(x); +SELECT arrayCompact([toDecimal64(1.0,8), toDecimal64(1.0,8)]) x, toTypeName(x); +SELECT arrayCompact([toDecimal128(1.0,8), toDecimal128(1.0,8)]) x, toTypeName(x); +SELECT '-'; diff --git a/dbms/tests/queries/0_stateless/00700_decimal_gathers.reference b/dbms/tests/queries/0_stateless/00700_decimal_gathers.reference new file mode 100644 index 00000000000..bbfd7388e12 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00700_decimal_gathers.reference @@ -0,0 +1,13 @@ +[2.000] +[2.0000000000] +[2.000000000000000000] +[1.000] +[1.0000000000] +[1.000000000000000000] +- +[2.000] +[1] +[2.000000000000000000] +[1.000] +[2] +[1.000000000000000000] diff --git a/dbms/tests/queries/0_stateless/00700_decimal_gathers.sql b/dbms/tests/queries/0_stateless/00700_decimal_gathers.sql new file mode 100644 index 00000000000..98519577b62 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00700_decimal_gathers.sql @@ -0,0 +1,17 @@ +select if(1, [cast(materialize(2.0),'Decimal(9,3)')], [cast(materialize(1.0),'Decimal(9,3)')]); +select if(1, [cast(materialize(2.0),'Decimal(18,10)')], [cast(materialize(1.0),'Decimal(18,10)')]); +select if(1, [cast(materialize(2.0),'Decimal(38,18)')], [cast(materialize(1.0),'Decimal(38,18)')]); + +select if(0, [cast(materialize(2.0),'Decimal(9,3)')], [cast(materialize(1.0),'Decimal(9,3)')]); +select if(0, [cast(materialize(2.0),'Decimal(18,10)')], [cast(materialize(1.0),'Decimal(18,10)')]); +select if(0, [cast(materialize(2.0),'Decimal(38,18)')], [cast(materialize(1.0),'Decimal(38,18)')]); + +select '-'; + +select if(1, [cast(materialize(2.0),'Decimal(9,3)')], [cast(materialize(1.0),'Decimal(9,0)')]); +select if(0, [cast(materialize(2.0),'Decimal(18,10)')], [cast(materialize(1.0),'Decimal(18,0)')]); +select if(1, [cast(materialize(2.0),'Decimal(38,18)')], [cast(materialize(1.0),'Decimal(38,8)')]); + +select if(0, [cast(materialize(2.0),'Decimal(9,0)')], [cast(materialize(1.0),'Decimal(9,3)')]); +select if(1, [cast(materialize(2.0),'Decimal(18,0)')], [cast(materialize(1.0),'Decimal(18,10)')]); +select if(0, [cast(materialize(2.0),'Decimal(38,0)')], [cast(materialize(1.0),'Decimal(38,18)')]); diff --git a/dbms/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.sql b/dbms/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.sql index abf2903d3ea..aa386829276 100644 --- a/dbms/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.sql +++ b/dbms/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.sql @@ -12,8 +12,7 @@ SYSTEM SYNC REPLICA byte_identical_r2; ALTER TABLE byte_identical_r1 ADD COLUMN y DEFAULT rand(); OPTIMIZE TABLE byte_identical_r1 PARTITION tuple() FINAL; -SET any_join_distinct_right_table_keys = 1; -SELECT x, t1.y - t2.y FROM byte_identical_r1 t1 ANY INNER JOIN byte_identical_r2 t2 USING x ORDER BY x; +SELECT x, t1.y - t2.y FROM byte_identical_r1 t1 SEMI LEFT JOIN byte_identical_r2 t2 USING x ORDER BY x; DROP TABLE byte_identical_r1; DROP TABLE byte_identical_r2; diff --git a/dbms/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.reference b/dbms/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.reference index 074ee47e294..afbe9855519 100644 --- a/dbms/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.reference +++ b/dbms/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.reference @@ -6,3 +6,6 @@ 1 x x 1 x x 1 x x +1 x x +1 x x +1 x x diff --git a/dbms/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.sql b/dbms/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.sql index cf1c0bfe1f7..14a7424e634 100644 --- a/dbms/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.sql +++ b/dbms/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.sql @@ -12,5 +12,7 @@ SELECT * FROM (SELECT 1 AS a, 'x' AS b) any full join (SELECT 1 as a, 'y' as b) SELECT * FROM (SELECT 1 AS a, 'x' AS b) any right join (SELECT 1 as a, 'y' as b) using a; SET any_join_distinct_right_table_keys = 0; +SELECT * FROM (SELECT 1 AS a, 'x' AS b) any join (SELECT 1 as a, 'y' as b) using a; +SELECT * FROM (SELECT 1 AS a, 'x' AS b) left join (SELECT 1 as a, 'y' as b) using a; +SELECT * FROM (SELECT 1 AS a, 'x' AS b) any right join (SELECT 1 as a, 'y' as b) using a; SELECT * FROM (SELECT 1 AS a, 'x' AS b) any full join (SELECT 1 as a, 'y' as b) using a; -- { serverError 48 } -SELECT * FROM (SELECT 1 AS a, 'x' AS b) any right join (SELECT 1 as a, 'y' as b) using a; -- { serverError 48 } diff --git a/dbms/tests/queries/0_stateless/00859_distinct_with_join.sql b/dbms/tests/queries/0_stateless/00859_distinct_with_join.sql index 23c41549502..4fb6f4ec046 100644 --- a/dbms/tests/queries/0_stateless/00859_distinct_with_join.sql +++ b/dbms/tests/queries/0_stateless/00859_distinct_with_join.sql @@ -1,5 +1,3 @@ -set any_join_distinct_right_table_keys = 1; - drop table if exists fooL; drop table if exists fooR; create table fooL (a Int32, v String) engine = Memory; @@ -9,7 +7,7 @@ insert into fooL select number, 'L' || toString(number) from numbers(2); insert into fooL select number, 'LL' || toString(number) from numbers(2); insert into fooR select number, 'R' || toString(number) from numbers(2); -select distinct a from fooL any join fooR using(a) order by a; +select distinct a from fooL semi left join fooR using(a) order by a; drop table fooL; drop table fooR; diff --git a/dbms/tests/queries/0_stateless/00918_json_functions.reference b/dbms/tests/queries/0_stateless/00918_json_functions.reference index 181da3dd3a0..32cde7bbfb4 100644 --- a/dbms/tests/queries/0_stateless/00918_json_functions.reference +++ b/dbms/tests/queries/0_stateless/00918_json_functions.reference @@ -166,3 +166,12 @@ d e u v +--JSONExtractArrayRaw-- +[] +[] +[] +['[]','[]'] +['-100','200','300'] +['1','2','3','4','5','"hello"'] +['1','2','3'] +['4','5','6'] diff --git a/dbms/tests/queries/0_stateless/00918_json_functions.sql b/dbms/tests/queries/0_stateless/00918_json_functions.sql index 4cb2445ca2a..0db9540377e 100644 --- a/dbms/tests/queries/0_stateless/00918_json_functions.sql +++ b/dbms/tests/queries/0_stateless/00918_json_functions.sql @@ -182,3 +182,12 @@ SELECT JSONExtractRaw('{"abc":"\\u263a"}', 'abc'); SELECT '--const/non-const mixed--'; SELECT JSONExtractString('["a", "b", "c", "d", "e"]', idx) FROM (SELECT arrayJoin([1,2,3,4,5]) AS idx); SELECT JSONExtractString(json, 's') FROM (SELECT arrayJoin(['{"s":"u"}', '{"s":"v"}']) AS json); + +SELECT '--JSONExtractArrayRaw--'; +SELECT JSONExtractArrayRaw(''); +SELECT JSONExtractArrayRaw('{"a": "hello", "b": "not_array"}'); +SELECT JSONExtractArrayRaw('[]'); +SELECT JSONExtractArrayRaw('[[],[]]'); +SELECT JSONExtractArrayRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b'); +SELECT JSONExtractArrayRaw('[1,2,3,4,5,"hello"]'); +SELECT JSONExtractArrayRaw(arrayJoin(JSONExtractArrayRaw('[[1,2,3],[4,5,6]]'))); diff --git a/dbms/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.sql b/dbms/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.sql index 244e04a564a..f70bccd68fd 100644 --- a/dbms/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.sql +++ b/dbms/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.sql @@ -1,4 +1,3 @@ -SET any_join_distinct_right_table_keys = 1; SET join_use_nulls = 1; -SELECT number FROM system.numbers ANY INNER JOIN (SELECT number, ['test'] FROM system.numbers LIMIT 1) js2 USING (number) LIMIT 1; +SELECT number FROM system.numbers SEMI LEFT JOIN (SELECT number, ['test'] FROM system.numbers LIMIT 1) js2 USING (number) LIMIT 1; SELECT number FROM system.numbers ANY LEFT JOIN (SELECT number, ['test'] FROM system.numbers LIMIT 1) js2 USING (number) LIMIT 1; diff --git a/dbms/tests/queries/0_stateless/01009_insert_select_data_loss.sql b/dbms/tests/queries/0_stateless/01009_insert_select_data_loss.sql index 9a754d94323..7ecffd8653c 100644 --- a/dbms/tests/queries/0_stateless/01009_insert_select_data_loss.sql +++ b/dbms/tests/queries/0_stateless/01009_insert_select_data_loss.sql @@ -1,5 +1,5 @@ drop table if exists tab; create table tab (x UInt64) engine = MergeTree order by tuple(); -insert into tab select number as n from numbers(20) any inner join (select number * 10 as n from numbers(2)) using(n) settings any_join_distinct_right_table_keys = 1, max_block_size = 5; +insert into tab select number as n from numbers(20) semi left join (select number * 10 as n from numbers(2)) using(n) settings max_block_size = 5; select * from tab order by x; diff --git a/dbms/tests/queries/0_stateless/01009_insert_select_nicelulu.sql b/dbms/tests/queries/0_stateless/01009_insert_select_nicelulu.sql index 90a902c352d..3fe7ec04e85 100644 --- a/dbms/tests/queries/0_stateless/01009_insert_select_nicelulu.sql +++ b/dbms/tests/queries/0_stateless/01009_insert_select_nicelulu.sql @@ -1,4 +1,3 @@ -Set any_join_distinct_right_table_keys=1; DROP TABLE IF EXISTS test_insert_t1; DROP TABLE IF EXISTS test_insert_t2; DROP TABLE IF EXISTS test_insert_t3; @@ -15,7 +14,7 @@ INSERT INTO test_insert_t2 SELECT '2019-09-01',toString(number) FROM system.numb INSERT INTO test_insert_t2 SELECT '2019-09-01',toString(number) FROM system.numbers WHERE number >=700000 limit 200; INSERT INTO test_insert_t2 SELECT '2019-09-01',toString(number) FROM system.numbers WHERE number >=900000 limit 200; -INSERT INTO test_insert_t3 SELECT '2019-09-01', uid, name, city FROM ( SELECT dt, uid, name, city FROM test_insert_t1 WHERE dt = '2019-09-01') t1 GLOBAL ANY INNER JOIN (SELECT uid FROM test_insert_t2 WHERE dt = '2019-09-01') t2 ON t1.uid=t2.uid; +INSERT INTO test_insert_t3 SELECT '2019-09-01', uid, name, city FROM ( SELECT dt, uid, name, city FROM test_insert_t1 WHERE dt = '2019-09-01') t1 GLOBAL SEMI LEFT JOIN (SELECT uid FROM test_insert_t2 WHERE dt = '2019-09-01') t2 ON t1.uid=t2.uid; SELECT count(*) FROM test_insert_t3; diff --git a/dbms/tests/queries/0_stateless/01015_array_split.reference b/dbms/tests/queries/0_stateless/01015_array_split.reference index ea9d36a95b2..652e7ccc43c 100644 --- a/dbms/tests/queries/0_stateless/01015_array_split.reference +++ b/dbms/tests/queries/0_stateless/01015_array_split.reference @@ -6,8 +6,10 @@ [[1],[2],[3],[4],[5]] [[1,2],[3,4],[5]] [[1],[2,3],[4,5]] -[[]] -[[]] +[] +[] +[] +[] [] [] [[1]] diff --git a/dbms/tests/queries/0_stateless/01015_array_split.sql b/dbms/tests/queries/0_stateless/01015_array_split.sql index 64d456ed724..8ae96ba01e6 100644 --- a/dbms/tests/queries/0_stateless/01015_array_split.sql +++ b/dbms/tests/queries/0_stateless/01015_array_split.sql @@ -12,6 +12,8 @@ SELECT arraySplit(x -> 0, []); SELECT arrayReverseSplit(x -> 0, []); SELECT arraySplit(x -> 1, []); SELECT arrayReverseSplit(x -> 1, []); +SELECT arraySplit(x -> x, emptyArrayUInt8()); +SELECT arrayReverseSplit(x -> x, emptyArrayUInt8()); SELECT arraySplit(x -> x % 2 = 1, [1]); SELECT arrayReverseSplit(x -> x % 2 = 1, [1]); diff --git a/dbms/tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.reference b/dbms/tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.reference new file mode 100644 index 00000000000..b12baf4709e --- /dev/null +++ b/dbms/tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.reference @@ -0,0 +1,80 @@ +-- SummingMergeTree with Nullable column without duplicates. +2018-02-01 00:00:00 1 +2018-02-02 00:00:00 2 +-- 2 2 +2 +2 +-- 2 2 +2 +2 +-- 2 2 +2 +2 +-- 2 2 2 2 +2 +2 +2 +2 +-- 2 2 +2 +2 +-- SummingMergeTree with Nullable column with duplicates +2018-02-01 00:00:00 4 +2018-02-02 00:00:00 6 +-- 4 2 +4 +2 +-- 4 2 +4 +2 +-- 4 2 +4 +2 +-- 2 2 2 2 +2 +2 +2 +2 +-- 2 2 +2 +2 +-- SummingMergeTree without Nullable column without duplicates. +2018-02-01 00:00:00 1 +2018-02-02 00:00:00 2 +-- 2 2 +2 +2 +-- 2 2 +2 +2 +-- 2 2 +2 +2 +-- 2 2 2 2 +2 +2 +2 +2 +-- 2 2 +2 +2 +-- SummingMergeTree without Nullable column with duplicates. +2018-02-01 00:00:00 4 +2018-02-02 00:00:00 6 +-- 4 2 +4 +2 +-- 4 2 +4 +2 +-- 4 2 +4 +2 +-- 2 2 2 2 +2 +2 +2 +2 +-- 2 2 +2 +2 diff --git a/dbms/tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.sql b/dbms/tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.sql new file mode 100644 index 00000000000..a9f7bf7ecd7 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.sql @@ -0,0 +1,123 @@ +select '-- SummingMergeTree with Nullable column without duplicates.'; + +drop table if exists tst; +create table tst (timestamp DateTime, val Nullable(Int8)) engine SummingMergeTree partition by toYYYYMM(timestamp) ORDER by (timestamp); +insert into tst values ('2018-02-01 00:00:00', 1), ('2018-02-02 00:00:00', 2); + +select * from tst final; + +select '-- 2 2'; +select count() from tst; +select count() from tst final; + +select '-- 2 2'; +select count() from tst where timestamp is not null; +select count() from tst final where timestamp is not null; + +select '-- 2 2'; +select count() from tst where val is not null; +select count() from tst final where val is not null; + +select '-- 2 2 2 2'; +select count() from tst final where timestamp>0; +select count() from tst final prewhere timestamp > 0; +select count() from tst final where timestamp > '2017-01-01 00:00:00'; +select count() from tst final prewhere timestamp > '2017-01-01 00:00:00'; + +select '-- 2 2'; +select count() from tst final where val>0; +select count() from tst final prewhere val>0; + +select '-- SummingMergeTree with Nullable column with duplicates'; + +drop table if exists tst; +create table tst (timestamp DateTime, val Nullable(Int8)) engine SummingMergeTree partition by toYYYYMM(timestamp) ORDER by (timestamp); +insert into tst values ('2018-02-01 00:00:00', 1), ('2018-02-02 00:00:00', 2), ('2018-02-01 00:00:00', 3), ('2018-02-02 00:00:00', 4); + +select * from tst final; + +select '-- 4 2'; +select count() from tst; +select count() from tst final; + +select '-- 4 2'; +select count() from tst where timestamp is not null; +select count() from tst final where timestamp is not null; + +select '-- 4 2'; +select count() from tst where val is not null; +select count() from tst final where val is not null; + +select '-- 2 2 2 2'; +select count() from tst final where timestamp>0; +select count() from tst final prewhere timestamp > 0; +select count() from tst final where timestamp > '2017-01-01 00:00:00'; +select count() from tst final prewhere timestamp > '2017-01-01 00:00:00'; + +select '-- 2 2'; +select count() from tst final where val>0; +select count() from tst final prewhere val>0; + +select '-- SummingMergeTree without Nullable column without duplicates.'; + +drop table if exists tst; +create table tst (timestamp DateTime, val Int8) engine SummingMergeTree partition by toYYYYMM(timestamp) ORDER by (timestamp); +insert into tst values ('2018-02-01 00:00:00', 1), ('2018-02-02 00:00:00', 2); + +select * from tst final; + +select '-- 2 2'; +select count() from tst; +select count() from tst final; + +select '-- 2 2 '; +select count() from tst where timestamp is not null; +select count() from tst final where timestamp is not null; + +select '-- 2 2'; +select count() from tst where val is not null; +select count() from tst final where val is not null; + +select '-- 2 2 2 2'; +select count() from tst final where timestamp>0; +select count() from tst final prewhere timestamp > 0; +select count() from tst final where timestamp > '2017-01-01 00:00:00'; +select count() from tst final prewhere timestamp > '2017-01-01 00:00:00'; + +select '-- 2 2'; +select count() from tst final where val>0; +select count() from tst final prewhere val>0; + +drop table tst; + +select '-- SummingMergeTree without Nullable column with duplicates.'; + +drop table if exists tst; +create table tst (timestamp DateTime, val Int8) engine SummingMergeTree partition by toYYYYMM(timestamp) ORDER by (timestamp); +insert into tst values ('2018-02-01 00:00:00', 1), ('2018-02-02 00:00:00', 2), ('2018-02-01 00:00:00', 3), ('2018-02-02 00:00:00', 4); + +select * from tst final; + +select '-- 4 2'; +select count() from tst; +select count() from tst final; + +select '-- 4 2'; +select count() from tst where timestamp is not null; +select count() from tst final where timestamp is not null; + +select '-- 4 2'; +select count() from tst where val is not null; +select count() from tst final where val is not null; + +select '-- 2 2 2 2'; +select count() from tst final where timestamp>0; +select count() from tst final prewhere timestamp > 0; +select count() from tst final where timestamp > '2017-01-01 00:00:00'; +select count() from tst final prewhere timestamp > '2017-01-01 00:00:00'; + +select '-- 2 2'; +select count() from tst final where val>0; +select count() from tst final prewhere val>0; + +drop table tst; diff --git a/dbms/tests/queries/0_stateless/01030_limit_by_with_ties_error.reference b/dbms/tests/queries/0_stateless/01030_limit_by_with_ties_error.reference new file mode 100644 index 00000000000..0b2175ee23d --- /dev/null +++ b/dbms/tests/queries/0_stateless/01030_limit_by_with_ties_error.reference @@ -0,0 +1,14 @@ +OK +OK +upyachka a +test b +foo c +bar d +hello x +world x +upyachka a +test b +foo c +bar d +hello x +world x diff --git a/dbms/tests/queries/0_stateless/01030_limit_by_with_ties_error.sh b/dbms/tests/queries/0_stateless/01030_limit_by_with_ties_error.sh new file mode 100755 index 00000000000..438a1fdd258 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01030_limit_by_with_ties_error.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +CLICKHOUSE_CLIENT=`echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=none/g'` + +$CLICKHOUSE_CLIENT --query="SELECT * FROM (SELECT number % 5 AS a, count() AS b, c FROM numbers(10) ARRAY JOIN [1,2] AS c GROUP BY a,c) AS table ORDER BY a LIMIT 3 WITH TIES BY a" 2>&1 | grep -q "Code: 498." && echo 'OK' || echo 'FAIL' ||: + +$CLICKHOUSE_CLIENT --query="SELECT * FROM VALUES('Phrase String, Payload String', ('hello', 'x'), ('world', 'x'), ('hello', 'z'), ('upyachka', 'a'), ('test', 'b'), ('foo', 'c'), ('bar', 'd')) ORDER BY Payload LIMIT 1 WITH TIES BY Phrase LIMIT 5;" 2>&1 | grep -q "Code: 498." && echo 'OK' || echo 'FAIL' ||: + +$CLICKHOUSE_CLIENT --query="SELECT * FROM VALUES('Phrase String, Payload String', ('hello', 'x'), ('world', 'x'), ('hello', 'z'), ('upyachka', 'a'), ('test', 'b'), ('foo', 'c'), ('bar', 'd')) ORDER BY Payload LIMIT 1 BY Phrase LIMIT 5 WITH TIES" + +$CLICKHOUSE_CLIENT --query="SELECT TOP 5 WITH TIES * FROM VALUES('Phrase String, Payload String', ('hello', 'x'), ('world', 'x'), ('hello', 'z'), ('upyachka', 'a'), ('test', 'b'), ('foo', 'c'), ('bar', 'd')) ORDER BY Payload LIMIT 1 BY Phrase" diff --git a/dbms/tests/queries/0_stateless/01030_storage_s3_syntax.sql b/dbms/tests/queries/0_stateless/01030_storage_s3_syntax.sql index 6579984f57d..44cd149dd51 100644 --- a/dbms/tests/queries/0_stateless/01030_storage_s3_syntax.sql +++ b/dbms/tests/queries/0_stateless/01030_storage_s3_syntax.sql @@ -2,7 +2,7 @@ drop table if exists test_table_s3_syntax ; create table test_table_s3_syntax (id UInt32) ENGINE = S3('') ; -- { serverError 42 } -create table test_table_s3_syntax (id UInt32) ENGINE = S3('','','','') +create table test_table_s3_syntax (id UInt32) ENGINE = S3('','','','','','') ; -- { serverError 42 } drop table if exists test_table_s3_syntax ; diff --git a/dbms/tests/queries/0_stateless/01030_storage_set_supports_read.reference b/dbms/tests/queries/0_stateless/01030_storage_set_supports_read.reference new file mode 100644 index 00000000000..3c6d3acf6f4 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01030_storage_set_supports_read.reference @@ -0,0 +1,11 @@ +1 +2 +3 +1 +2 +3 +4 +5 +1 +2 +3 diff --git a/dbms/tests/queries/0_stateless/01030_storage_set_supports_read.sql b/dbms/tests/queries/0_stateless/01030_storage_set_supports_read.sql new file mode 100644 index 00000000000..8b406cae769 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01030_storage_set_supports_read.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS userid_test; + +SET use_index_for_in_with_subqueries = 1; + +CREATE TABLE userid_test (userid UInt64) ENGINE = MergeTree() PARTITION BY (intDiv(userid, 500)) ORDER BY (userid) SETTINGS index_granularity = 8192; + +INSERT INTO userid_test VALUES (1),(2),(3),(4),(5); + +DROP TABLE IF EXISTS userid_set; + +CREATE TABLE userid_set(userid UInt64) ENGINE = Set; + +INSERT INTO userid_set VALUES (1),(2),(3); + +SELECT * FROM userid_test WHERE userid IN (1, 2, 3); + +SELECT * FROM userid_test WHERE toUInt64(1) IN (userid_set); + +SELECT * FROM userid_test WHERE userid IN (userid_set); + +DROP TABLE userid_test; diff --git a/dbms/tests/queries/0_stateless/01031_new_any_join.reference b/dbms/tests/queries/0_stateless/01031_new_any_join.reference new file mode 100644 index 00000000000..1fd9a5352e3 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01031_new_any_join.reference @@ -0,0 +1,32 @@ +any left +0 a1 0 +1 a2 0 +2 a3 2 b1 +3 a4 0 +4 a5 4 b3 +any left (rev) +0 5 b6 +2 a3 2 b1 +2 a3 2 b2 +4 a5 4 b3 +4 a5 4 b4 +4 a5 4 b5 +any inner +2 a3 2 b1 +4 a5 4 b3 +any inner (rev) +2 a3 2 b1 +4 a5 4 b3 +any right +0 5 b6 +2 a3 2 b1 +2 a3 2 b2 +4 a5 4 b3 +4 a5 4 b4 +4 a5 4 b5 +any right (rev) +0 a1 0 +1 a2 0 +2 a3 2 b1 +3 a4 0 +4 a5 4 b3 diff --git a/dbms/tests/queries/0_stateless/01031_new_any_join.sql b/dbms/tests/queries/0_stateless/01031_new_any_join.sql new file mode 100644 index 00000000000..de86d8eebc5 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01031_new_any_join.sql @@ -0,0 +1,32 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (x UInt32, s String) engine = Memory; +CREATE TABLE t2 (x UInt32, s String) engine = Memory; + +INSERT INTO t1 (x, s) VALUES (0, 'a1'), (1, 'a2'), (2, 'a3'), (3, 'a4'), (4, 'a5'); +INSERT INTO t2 (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); + +SET join_use_nulls = 0; +SET any_join_distinct_right_table_keys = 0; + +SELECT 'any left'; +SELECT t1.*, t2.* FROM t1 ANY LEFT JOIN t2 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'any left (rev)'; +SELECT t1.*, t2.* FROM t2 ANY LEFT JOIN t1 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'any inner'; +SELECT t1.*, t2.* FROM t1 ANY INNER JOIN t2 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'any inner (rev)'; +SELECT t1.*, t2.* FROM t2 ANY INNER JOIN t1 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'any right'; +SELECT t1.*, t2.* FROM t1 ANY RIGHT JOIN t2 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'any right (rev)'; +SELECT t1.*, t2.* FROM t2 ANY RIGHT JOIN t1 USING(x) ORDER BY t1.x, t2.x; + +DROP TABLE t1; +DROP TABLE t2; diff --git a/dbms/tests/queries/0_stateless/01031_semi_anti_join.reference b/dbms/tests/queries/0_stateless/01031_semi_anti_join.reference new file mode 100644 index 00000000000..782147f1f6f --- /dev/null +++ b/dbms/tests/queries/0_stateless/01031_semi_anti_join.reference @@ -0,0 +1,16 @@ +semi left +2 a3 2 b1 +2 a6 2 b1 +4 a5 4 b3 +semi right +2 a3 2 b1 +2 a3 2 b2 +4 a5 4 b3 +4 a5 4 b4 +4 a5 4 b5 +anti left +0 a1 0 +1 a2 1 +3 a4 3 +anti right +0 5 b6 diff --git a/dbms/tests/queries/0_stateless/01031_semi_anti_join.sql b/dbms/tests/queries/0_stateless/01031_semi_anti_join.sql new file mode 100644 index 00000000000..19ea219563a --- /dev/null +++ b/dbms/tests/queries/0_stateless/01031_semi_anti_join.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (x UInt32, s String) engine = Memory; +CREATE TABLE t2 (x UInt32, s String) engine = Memory; + +INSERT INTO t1 (x, s) VALUES (0, 'a1'), (1, 'a2'), (2, 'a3'), (3, 'a4'), (4, 'a5'), (2, 'a6'); +INSERT INTO t2 (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); + +SET join_use_nulls = 0; + +SELECT 'semi left'; +SELECT t1.*, t2.* FROM t1 SEMI LEFT JOIN t2 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'semi right'; +SELECT t1.*, t2.* FROM t1 SEMI RIGHT JOIN t2 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'anti left'; +SELECT t1.*, t2.* FROM t1 ANTI LEFT JOIN t2 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'anti right'; +SELECT t1.*, t2.* FROM t1 ANTI RIGHT JOIN t2 USING(x) ORDER BY t1.x, t2.x; + +DROP TABLE t1; +DROP TABLE t2; diff --git a/dbms/tests/queries/0_stateless/01032_cityHash64_for_UUID.reference b/dbms/tests/queries/0_stateless/01032_cityHash64_for_UUID.reference new file mode 100644 index 00000000000..e592bdd2331 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01032_cityHash64_for_UUID.reference @@ -0,0 +1,2 @@ +17345439983272262203 +1 diff --git a/dbms/tests/queries/0_stateless/01032_cityHash64_for_UUID.sql b/dbms/tests/queries/0_stateless/01032_cityHash64_for_UUID.sql new file mode 100644 index 00000000000..76a2389b833 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01032_cityHash64_for_UUID.sql @@ -0,0 +1,9 @@ +SELECT cityHash64(toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0')) AS uuid; +DROP TABLE IF EXISTS t_uuid; +CREATE TABLE t_uuid (x UUID) ENGINE=TinyLog; +INSERT INTO t_uuid SELECT generateUUIDv4(); +INSERT INTO t_uuid SELECT generateUUIDv4(); +INSERT INTO t_uuid SELECT generateUUIDv4(); +INSERT INTO t_uuid SELECT generateUUIDv4(); +SELECT (SELECT count() FROM t_uuid WHERE cityHash64(reinterpretAsString(x)) = cityHash64(x) and length(reinterpretAsString(x)) = 16) = (SELECT count() AS c2 FROM t_uuid WHERE length(reinterpretAsString(x)) = 16); +DROP TABLE IF EXISTS t_uuid; diff --git a/dbms/tests/queries/0_stateless/01032_cityHash64_for_decimal.reference b/dbms/tests/queries/0_stateless/01032_cityHash64_for_decimal.reference new file mode 100644 index 00000000000..afc31c90c7e --- /dev/null +++ b/dbms/tests/queries/0_stateless/01032_cityHash64_for_decimal.reference @@ -0,0 +1,9 @@ +7948708622144234705 +8929217225161595187 +5200826623470796493 +16603170516127487987 +10474166755000741993 +13955506427180385733 +15701171052063596312 +12183501908379561231 +5781832733050385252 diff --git a/dbms/tests/queries/0_stateless/01032_cityHash64_for_decimal.sql b/dbms/tests/queries/0_stateless/01032_cityHash64_for_decimal.sql new file mode 100644 index 00000000000..3b596dd5398 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01032_cityHash64_for_decimal.sql @@ -0,0 +1,6 @@ +SELECT cityHash64(toDecimal32(32, 2)); +SELECT cityHash64(toDecimal64(64, 5)); +SELECT cityHash64(toDecimal128(128, 24)); +SELECT cityHash64(toDecimal32(number, 3)) from numbers(198, 2); +SELECT cityHash64(toDecimal64(number, 9)) from numbers(297, 2); +SELECT cityHash64(toDecimal128(number, 16)) from numbers(123, 2); diff --git a/dbms/tests/queries/0_stateless/01033_dictionaries_lifetime.reference b/dbms/tests/queries/0_stateless/01033_dictionaries_lifetime.reference new file mode 100644 index 00000000000..07c56f08482 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01033_dictionaries_lifetime.reference @@ -0,0 +1,3 @@ +INITIALIZING DICTIONARY +1 +1 10 diff --git a/dbms/tests/queries/0_stateless/01033_dictionaries_lifetime.sql b/dbms/tests/queries/0_stateless/01033_dictionaries_lifetime.sql new file mode 100644 index 00000000000..8b16c401afe --- /dev/null +++ b/dbms/tests/queries/0_stateless/01033_dictionaries_lifetime.sql @@ -0,0 +1,50 @@ +SET send_logs_level = 'none'; + +DROP DATABASE IF EXISTS database_for_dict; + +CREATE DATABASE database_for_dict Engine = Ordinary; + +DROP TABLE IF EXISTS database_for_dict.table_for_dict; + +CREATE TABLE database_for_dict.table_for_dict +( + key_column UInt64, + second_column UInt8, + third_column String +) +ENGINE = MergeTree() +ORDER BY key_column; + +INSERT INTO database_for_dict.table_for_dict VALUES (1, 100, 'Hello world'); + +DROP DATABASE IF EXISTS ordinary_db; + +CREATE DATABASE ordinary_db ENGINE = Ordinary; + +DROP DICTIONARY IF EXISTS ordinary_db.dict1; + +CREATE DICTIONARY ordinary_db.dict1 +( + key_column UInt64 DEFAULT 0, + second_column UInt8 DEFAULT 1, + third_column String DEFAULT 'qqq' +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict' PASSWORD '' DB 'database_for_dict')) +LIFETIME(MIN 1 MAX 10) +LAYOUT(FLAT()); + +SELECT 'INITIALIZING DICTIONARY'; + +SELECT dictGetUInt8('ordinary_db.dict1', 'second_column', toUInt64(100500)); + +SELECT lifetime_min, lifetime_max FROM system.dictionaries WHERE name = 'dict1'; + +DROP DICTIONARY IF EXISTS ordinary_db.dict1; + +DROP DATABASE IF EXISTS ordinary_db; + +DROP TABLE IF EXISTS database_for_dict.table_for_dict; + +DROP DATABASE IF EXISTS database_for_dict; + diff --git a/dbms/tests/queries/0_stateless/01033_quota_dcl.reference b/dbms/tests/queries/0_stateless/01033_quota_dcl.reference new file mode 100644 index 00000000000..7f92f992dd5 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01033_quota_dcl.reference @@ -0,0 +1,2 @@ +default +CREATE QUOTA default KEYED BY \'user name\' FOR INTERVAL 1 HOUR TRACKING TO default, readonly diff --git a/dbms/tests/queries/0_stateless/01033_quota_dcl.sql b/dbms/tests/queries/0_stateless/01033_quota_dcl.sql new file mode 100644 index 00000000000..a1c7f1fc204 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01033_quota_dcl.sql @@ -0,0 +1,3 @@ +SHOW QUOTAS; +SHOW CREATE QUOTA default; +CREATE QUOTA q1; -- { serverError 497 } diff --git a/dbms/tests/queries/0_stateless/01034_JSONCompactEachRow.reference b/dbms/tests/queries/0_stateless/01034_JSONCompactEachRow.reference new file mode 100644 index 00000000000..6ec53e11fc9 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01034_JSONCompactEachRow.reference @@ -0,0 +1,47 @@ +1 +[1, "a"] +[2, "b"] +[3, "c"] +2 +["a", "1"] +["b", "1"] +["c", "1"] +3 +["value", "name"] +["UInt8", "String"] +[1, "a"] +[2, "b"] +[3, "c"] +4 +["name", "c"] +["String", "UInt64"] +["a", "1"] +["b", "1"] +["c", "1"] + +["", "3"] +5 +["first", 1, 2, 0] +["second", 2, 0, 6] +6 +["first", 1, 2, 8] +["second", 2, 32, 6] +7 +[16, [15,16,0], ["first","second","third"]] +8 +["first", 1, 2, 0] +["second", 2, 0, 6] +9 +["first", 1, 2, 8] +["second", 2, 32, 6] +10 +["first", 1, 16, 8] +["second", 2, 32, 8] +11 +["v1", "v2", "v3", "v4"] +["String", "UInt8", "UInt16", "UInt8"] +["", 2, 3, 1] +12 +["v1", "n.id", "n.name"] +["UInt8", "Array(UInt8)", "Array(String)"] +[16, [15,16,0], ["first","second","third"]] diff --git a/dbms/tests/queries/0_stateless/01034_JSONCompactEachRow.sql b/dbms/tests/queries/0_stateless/01034_JSONCompactEachRow.sql new file mode 100644 index 00000000000..46a0e90e69d --- /dev/null +++ b/dbms/tests/queries/0_stateless/01034_JSONCompactEachRow.sql @@ -0,0 +1,63 @@ +DROP TABLE IF EXISTS test_table; +DROP TABLE IF EXISTS test_table_2; +SELECT 1; +/* Check JSONCompactEachRow Output */ +CREATE TABLE test_table (value UInt8, name String) ENGINE = MergeTree() ORDER BY value; +INSERT INTO test_table VALUES (1, 'a'), (2, 'b'), (3, 'c'); +SELECT * FROM test_table FORMAT JSONCompactEachRow; +SELECT 2; +/* Check Totals */ +SELECT name, count() AS c FROM test_table GROUP BY name WITH TOTALS ORDER BY name FORMAT JSONCompactEachRow; +SELECT 3; +/* Check JSONCompactEachRowWithNamesAndTypes Output */ +SELECT * FROM test_table FORMAT JSONCompactEachRowWithNamesAndTypes; +SELECT 4; +/* Check Totals */ +SELECT name, count() AS c FROM test_table GROUP BY name WITH TOTALS ORDER BY name FORMAT JSONCompactEachRowWithNamesAndTypes; +DROP TABLE IF EXISTS test_table; +SELECT 5; +/* Check JSONCompactEachRow Input */ +CREATE TABLE test_table (v1 String, v2 UInt8, v3 DEFAULT v2 * 16, v4 UInt8 DEFAULT 8) ENGINE = MergeTree() ORDER BY v2; +INSERT INTO test_table FORMAT JSONCompactEachRow ["first", 1, "2", null] ["second", 2, null, 6]; +SELECT * FROM test_table FORMAT JSONCompactEachRow; +TRUNCATE TABLE test_table; +SELECT 6; +/* Check input_format_null_as_default = 1 */ +SET input_format_null_as_default = 1; +INSERT INTO test_table FORMAT JSONCompactEachRow ["first", 1, "2", null] ["second", 2, null, 6]; +SELECT * FROM test_table FORMAT JSONCompactEachRow; +TRUNCATE TABLE test_table; +SELECT 7; +/* Check Nested */ +CREATE TABLE test_table_2 (v1 UInt8, n Nested(id UInt8, name String)) ENGINE = MergeTree() ORDER BY v1; +INSERT INTO test_table_2 FORMAT JSONCompactEachRow [16, [15, 16, null], ["first", "second", "third"]]; +SELECT * FROM test_table_2 FORMAT JSONCompactEachRow; +TRUNCATE TABLE test_table_2; +SELECT 8; +/* Check JSONCompactEachRowWithNamesAndTypes Output */ +SET input_format_null_as_default = 0; +INSERT INTO test_table FORMAT JSONCompactEachRowWithNamesAndTypes ["v1", "v2", "v3", "v4"]["String","UInt8","UInt16","UInt8"]["first", 1, "2", null]["second", 2, null, 6]; +SELECT * FROM test_table FORMAT JSONCompactEachRow; +TRUNCATE TABLE test_table; +SELECT 9; +/* Check input_format_null_as_default = 1 */ +SET input_format_null_as_default = 1; +INSERT INTO test_table FORMAT JSONCompactEachRowWithNamesAndTypes ["v1", "v2", "v3", "v4"]["String","UInt8","UInt16","UInt8"]["first", 1, "2", null] ["second", 2, null, 6]; +SELECT * FROM test_table FORMAT JSONCompactEachRow; +SELECT 10; +/* Check Header */ +TRUNCATE TABLE test_table; +SET input_format_skip_unknown_fields = 1; +INSERT INTO test_table FORMAT JSONCompactEachRowWithNamesAndTypes ["v1", "v2", "invalid_column"]["String", "UInt8", "UInt8"]["first", 1, 32]["second", 2, "64"]; +SELECT * FROM test_table FORMAT JSONCompactEachRow; +SELECT 11; +TRUNCATE TABLE test_table; +INSERT INTO test_table FORMAT JSONCompactEachRowWithNamesAndTypes ["v4", "v2", "v3"]["UInt8", "UInt8", "UInt16"][1, 2, 3] +SELECT * FROM test_table FORMAT JSONCompactEachRowWithNamesAndTypes; +SELECT 12; +/* Check Nested */ +INSERT INTO test_table_2 FORMAT JSONCompactEachRowWithNamesAndTypes ["v1", "n.id", "n.name"]["UInt8", "Array(UInt8)", "Array(String)"][16, [15, 16, null], ["first", "second", "third"]]; +SELECT * FROM test_table_2 FORMAT JSONCompactEachRowWithNamesAndTypes; + +DROP TABLE IF EXISTS test_table; +DROP TABLE IF EXISTS test_table_2; diff --git a/dbms/tests/queries/0_stateless/01035_lc_empty_part_bug.reference b/dbms/tests/queries/0_stateless/01035_lc_empty_part_bug.reference new file mode 100644 index 00000000000..1ca0ea26354 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01035_lc_empty_part_bug.reference @@ -0,0 +1,3 @@ +Waiting for mutation to finish +still alive +100 diff --git a/dbms/tests/queries/0_stateless/01035_lc_empty_part_bug.sh b/dbms/tests/queries/0_stateless/01035_lc_empty_part_bug.sh new file mode 100755 index 00000000000..21f029f27f1 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01035_lc_empty_part_bug.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh +. $CURDIR/mergetree_mutations.lib + +# that test is failing on versions <= 19.11.12 + +${CLICKHOUSE_CLIENT} --multiquery --query=" + DROP TABLE IF EXISTS lc_empty_part_bug; + create table lc_empty_part_bug (id UInt64, s String) Engine=MergeTree ORDER BY id; + insert into lc_empty_part_bug select number as id, toString(rand()) from numbers(100); + alter table lc_empty_part_bug delete where id < 100; +" + +wait_for_mutation 'lc_empty_part_bug' 'mutation_2.txt' + +echo 'Waiting for mutation to finish' + +${CLICKHOUSE_CLIENT} --multiquery --query=" + alter table lc_empty_part_bug modify column s LowCardinality(String); + SELECT 'still alive'; + insert into lc_empty_part_bug select number+100 as id, toString(rand()) from numbers(100); + SELECT count() FROM lc_empty_part_bug WHERE not ignore(*); + DROP TABLE lc_empty_part_bug; +" diff --git a/dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.reference b/dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.reference new file mode 100644 index 00000000000..c4000b670ee --- /dev/null +++ b/dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.reference @@ -0,0 +1,10 @@ +0 +10 +1 +SYSTEM RELOAD DICTIONARY +0 +0 +10 +1 +CREATE DATABASE +1 diff --git a/dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.sql b/dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.sql new file mode 100644 index 00000000000..785e8b4b25a --- /dev/null +++ b/dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.sql @@ -0,0 +1,29 @@ +DROP DATABASE IF EXISTS dict_db_01036; +CREATE DATABASE dict_db_01036; + +CREATE TABLE dict_db_01036.dict_data (key UInt64, val UInt64) Engine=Memory(); +CREATE DICTIONARY dict_db_01036.dict +( + key UInt64 DEFAULT 0, + val UInt64 DEFAULT 10 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dict_data' PASSWORD '' DB 'dict_db_01036')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(FLAT()); + +SELECT query_count FROM system.dictionaries WHERE database = 'dict_db_01036' AND name = 'dict'; +SELECT dictGetUInt64('dict_db_01036.dict', 'val', toUInt64(0)); +SELECT query_count FROM system.dictionaries WHERE database = 'dict_db_01036' AND name = 'dict'; + +SELECT 'SYSTEM RELOAD DICTIONARY'; +SYSTEM RELOAD DICTIONARY 'dict_db_01036.dict'; +SELECT sleep(0.3); +SELECT query_count FROM system.dictionaries WHERE database = 'dict_db_01036' AND name = 'dict'; +SELECT dictGetUInt64('dict_db_01036.dict', 'val', toUInt64(0)); +SELECT query_count FROM system.dictionaries WHERE database = 'dict_db_01036' AND name = 'dict'; + +SELECT 'CREATE DATABASE'; +DROP DATABASE IF EXISTS empty_db_01036; +CREATE DATABASE empty_db_01036; +SELECT query_count FROM system.dictionaries WHERE database = 'dict_db_01036' AND name = 'dict'; diff --git a/dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.reference b/dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.reference new file mode 100644 index 00000000000..c4000b670ee --- /dev/null +++ b/dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.reference @@ -0,0 +1,10 @@ +0 +10 +1 +SYSTEM RELOAD DICTIONARY +0 +0 +10 +1 +CREATE DATABASE +1 diff --git a/dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.sql b/dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.sql new file mode 100644 index 00000000000..46ebdcbd7b3 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.sql @@ -0,0 +1,29 @@ +DROP DATABASE IF EXISTS `foo 1234`; +CREATE DATABASE `foo 1234`; + +CREATE TABLE `foo 1234`.dict_data (key UInt64, val UInt64) Engine=Memory(); +CREATE DICTIONARY `foo 1234`.dict +( + key UInt64 DEFAULT 0, + val UInt64 DEFAULT 10 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dict_data' PASSWORD '' DB 'foo 1234')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(FLAT()); + +SELECT query_count FROM system.dictionaries WHERE database = 'foo 1234' AND name = 'dict'; +SELECT dictGetUInt64('foo 1234.dict', 'val', toUInt64(0)); +SELECT query_count FROM system.dictionaries WHERE database = 'foo 1234' AND name = 'dict'; + +SELECT 'SYSTEM RELOAD DICTIONARY'; +SYSTEM RELOAD DICTIONARY 'foo 1234.dict'; +SELECT sleep(0.3); +SELECT query_count FROM system.dictionaries WHERE database = 'foo 1234' AND name = 'dict'; +SELECT dictGetUInt64('foo 1234.dict', 'val', toUInt64(0)); +SELECT query_count FROM system.dictionaries WHERE database = 'foo 1234' AND name = 'dict'; + +SELECT 'CREATE DATABASE'; +DROP DATABASE IF EXISTS `foo 123`; +CREATE DATABASE `foo 123`; +SELECT query_count FROM system.dictionaries WHERE database = 'foo 1234' AND name = 'dict'; diff --git a/dbms/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.reference b/dbms/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.reference new file mode 100644 index 00000000000..c47539e2301 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.reference @@ -0,0 +1,2 @@ +all_1_1_0 1 +all_0_0_0 1 diff --git a/dbms/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.sql b/dbms/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.sql new file mode 100644 index 00000000000..1d195b0388f --- /dev/null +++ b/dbms/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.sql @@ -0,0 +1,22 @@ +SET check_query_single_value_result = 0; +SET send_logs_level = 'none'; + +DROP TABLE IF EXISTS mt_without_pk; + +CREATE TABLE mt_without_pk (SomeField1 Int64, SomeField2 Double) ENGINE = MergeTree() ORDER BY tuple(); + +INSERT INTO mt_without_pk VALUES (1, 2); + +CHECK TABLE mt_without_pk; + +DROP TABLE IF EXISTS mt_without_pk; + +DROP TABLE IF EXISTS replicated_mt_without_pk; + +CREATE TABLE replicated_mt_without_pk (SomeField1 Int64, SomeField2 Double) ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt_without_pk', '1') ORDER BY tuple(); + +INSERT INTO replicated_mt_without_pk VALUES (1, 2); + +CHECK TABLE replicated_mt_without_pk; + +DROP TABLE IF EXISTS replicated_mt_without_pk; diff --git a/dbms/tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.reference b/dbms/tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.reference new file mode 100644 index 00000000000..a2fff10e1ab --- /dev/null +++ b/dbms/tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.reference @@ -0,0 +1,4 @@ +1.1 +77.77 +1.1 +2.2 diff --git a/dbms/tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.sh b/dbms/tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.sh new file mode 100755 index 00000000000..d3153be5e68 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +$CLICKHOUSE_CLIENT --query "DROP DATABASE IF EXISTS database_for_dict" + +$CLICKHOUSE_CLIENT --query "CREATE DATABASE database_for_dict Engine = Ordinary" + + +$CLICKHOUSE_CLIENT --query " +CREATE TABLE database_for_dict.table_for_dict +( + key_column UInt64, + value Float64 +) +ENGINE = MergeTree() +ORDER BY key_column" + +$CLICKHOUSE_CLIENT --query "INSERT INTO database_for_dict.table_for_dict VALUES (1, 1.1)" + +$CLICKHOUSE_CLIENT --query " +CREATE DICTIONARY database_for_dict.dict_with_zero_min_lifetime +( + key_column UInt64, + value Float64 DEFAULT 77.77 +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict' DB 'database_for_dict')) +LIFETIME(1) +LAYOUT(FLAT())" + +$CLICKHOUSE_CLIENT --query "SELECT dictGetFloat64('database_for_dict.dict_with_zero_min_lifetime', 'value', toUInt64(1))" + +$CLICKHOUSE_CLIENT --query "SELECT dictGetFloat64('database_for_dict.dict_with_zero_min_lifetime', 'value', toUInt64(2))" + +$CLICKHOUSE_CLIENT --query "INSERT INTO database_for_dict.table_for_dict VALUES (2, 2.2)" + + +function check() +{ + + query_result=`$CLICKHOUSE_CLIENT --query "SELECT dictGetFloat64('database_for_dict.dict_with_zero_min_lifetime', 'value', toUInt64(2))"` + + while [ $query_result != "2.2" ] + do + query_result=`$CLICKHOUSE_CLIENT --query "SELECT dictGetFloat64('database_for_dict.dict_with_zero_min_lifetime', 'value', toUInt64(2))"` + done +} + + +export -f check; + +timeout 10 bash -c check + +$CLICKHOUSE_CLIENT --query "SELECT dictGetFloat64('database_for_dict.dict_with_zero_min_lifetime', 'value', toUInt64(1))" + +$CLICKHOUSE_CLIENT --query "SELECT dictGetFloat64('database_for_dict.dict_with_zero_min_lifetime', 'value', toUInt64(2))" + +$CLICKHOUSE_CLIENT --query "DROP DATABASE IF EXISTS database_for_dict" diff --git a/dbms/tests/queries/0_stateless/01039_test_setting_parse.reference b/dbms/tests/queries/0_stateless/01039_test_setting_parse.reference new file mode 100644 index 00000000000..30237035c2c --- /dev/null +++ b/dbms/tests/queries/0_stateless/01039_test_setting_parse.reference @@ -0,0 +1,2 @@ +10000000001 +10000000001 diff --git a/dbms/tests/queries/0_stateless/01039_test_setting_parse.sql b/dbms/tests/queries/0_stateless/01039_test_setting_parse.sql new file mode 100644 index 00000000000..494e43b001f --- /dev/null +++ b/dbms/tests/queries/0_stateless/01039_test_setting_parse.sql @@ -0,0 +1,7 @@ +SET max_memory_usage = 10000000001; + +SELECT value FROM system.settings WHERE name = 'max_memory_usage'; + +SET max_memory_usage = '1G'; -- { serverError 27 } + +SELECT value FROM system.settings WHERE name = 'max_memory_usage'; diff --git a/dbms/tests/queries/0_stateless/01040_dictionary_invalidate_query_failover.reference b/dbms/tests/queries/0_stateless/01040_dictionary_invalidate_query_failover.reference new file mode 100644 index 00000000000..1fca8dab675 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01040_dictionary_invalidate_query_failover.reference @@ -0,0 +1,5 @@ +122 + +Table dictdb.dict_invalidate doesn\'t exist. + +133 diff --git a/dbms/tests/queries/0_stateless/01040_dictionary_invalidate_query_failover.sh b/dbms/tests/queries/0_stateless/01040_dictionary_invalidate_query_failover.sh new file mode 100755 index 00000000000..ef5d3053f9a --- /dev/null +++ b/dbms/tests/queries/0_stateless/01040_dictionary_invalidate_query_failover.sh @@ -0,0 +1,84 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + + +$CLICKHOUSE_CLIENT --query "DROP DATABASE IF EXISTS dictdb" + +$CLICKHOUSE_CLIENT --query "CREATE DATABASE dictdb Engine = Ordinary" + +$CLICKHOUSE_CLIENT --query " +CREATE TABLE dictdb.dict_invalidate +ENGINE = Memory AS +SELECT + 122 as dummy, + toDateTime('2019-10-29 18:51:35') AS last_time +FROM system.one" + + +$CLICKHOUSE_CLIENT --query " +CREATE DICTIONARY dictdb.invalidate +( + dummy UInt64, + two UInt8 EXPRESSION dummy +) +PRIMARY KEY dummy +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dict_invalidate' DB 'dictdb' INVALIDATE_QUERY 'select max(last_time) from dictdb.dict_invalidate')) +LIFETIME(MIN 0 MAX 1) +LAYOUT(FLAT())" + +$CLICKHOUSE_CLIENT --query "SELECT dictGetUInt8('dictdb.invalidate', 'two', toUInt64(122))" + +$CLICKHOUSE_CLIENT --query "SELECT last_exception FROM system.dictionaries WHERE database = 'dictdb' AND name = 'invalidate'" + +# Bad solution, but it's quite complicated to detect, that invalidte_query stopped updates. +# In worst case we don't check anything, but fortunately it doesn't lead to false negatives. +sleep 5 + +$CLICKHOUSE_CLIENT --query "DROP TABLE dictdb.dict_invalidate" + +function check_exception_detected() +{ + + query_result=`$CLICKHOUSE_CLIENT --query "SELECT last_exception FROM system.dictionaries WHERE database = 'dictdb' AND name = 'invalidate'" 2>&1` + + while [ -z "$query_result" ] + do + query_result=`$CLICKHOUSE_CLIENT --query "SELECT last_exception FROM system.dictionaries WHERE database = 'dictdb' AND name = 'invalidate'" 2>&1` + sleep 0.1 + done +} + + +export -f check_exception_detected; +timeout 10 bash -c check_exception_detected 2> /dev/null + +$CLICKHOUSE_CLIENT --query "SELECT last_exception FROM system.dictionaries WHERE database = 'dictdb' AND name = 'invalidate'" 2>&1 | grep -Eo "Table dictdb.dict_invalidate .* exist." + +$CLICKHOUSE_CLIENT --query " +CREATE TABLE dictdb.dict_invalidate +ENGINE = Memory AS +SELECT + 133 as dummy, + toDateTime('2019-10-29 18:51:35') AS last_time +FROM system.one" + +function check_exception_fixed() +{ + query_result=`$CLICKHOUSE_CLIENT --query "SELECT last_exception FROM system.dictionaries WHERE database = 'dictdb' AND name = 'invalidate'" 2>&1` + + while [ "$query_result" ] + do + query_result=`$CLICKHOUSE_CLIENT --query "SELECT last_exception FROM system.dictionaries WHERE database = 'dictdb' AND name = 'invalidate'" 2>&1` + sleep 0.1 + done +} + +export -f check_exception_fixed; +timeout 10 bash -c check_exception_fixed 2> /dev/null + +$CLICKHOUSE_CLIENT --query "SELECT last_exception FROM system.dictionaries WHERE database = 'dictdb' AND name = 'invalidate'" 2>&1 +$CLICKHOUSE_CLIENT --query "SELECT dictGetUInt8('dictdb.invalidate', 'two', toUInt64(133))" + +$CLICKHOUSE_CLIENT --query "DROP DATABASE IF EXISTS dictdb" diff --git a/dbms/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.reference b/dbms/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.reference new file mode 100644 index 00000000000..5565ed6787f --- /dev/null +++ b/dbms/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.reference @@ -0,0 +1,4 @@ +0 +1 +0 +1 diff --git a/dbms/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.sql b/dbms/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.sql new file mode 100644 index 00000000000..ffc33ce6949 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.sql @@ -0,0 +1,9 @@ +SET distributed_directory_monitor_batch_inserts=1; +SET distributed_directory_monitor_sleep_time_ms=10; +SET distributed_directory_monitor_max_sleep_time_ms=100; + +CREATE TABLE test (key UInt64) ENGINE=TinyLog(); +CREATE TABLE dist_test AS test Engine=Distributed(test_cluster_two_shards, currentDatabase(), test, key); +INSERT INTO dist_test SELECT toUInt64(number) FROM numbers(2); +SYSTEM FLUSH DISTRIBUTED dist_test; +SELECT * FROM dist_test; diff --git a/dbms/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.reference b/dbms/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.reference new file mode 100644 index 00000000000..15eecd22cf1 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.reference @@ -0,0 +1,2 @@ +1.1 +1.1 diff --git a/dbms/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.sql b/dbms/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.sql new file mode 100644 index 00000000000..5002b7a59ab --- /dev/null +++ b/dbms/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.sql @@ -0,0 +1,40 @@ +DROP DATABASE IF EXISTS dictdb; + +CREATE DATABASE dictdb ENGINE = Ordinary; + +CREATE TABLE dictdb.table_for_dict +( + key_column UInt64, + value Float64 +) +ENGINE = MergeTree() +ORDER BY key_column; + +INSERT INTO dictdb.table_for_dict VALUES (1, 1.1); + +CREATE DICTIONARY IF NOT EXISTS dictdb.dict_exists +( + key_column UInt64, + value Float64 DEFAULT 77.77 +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict' DB 'dictdb')) +LIFETIME(1) +LAYOUT(FLAT()); + +SELECT dictGetFloat64('dictdb.dict_exists', 'value', toUInt64(1)); + + +CREATE DICTIONARY IF NOT EXISTS dictdb.dict_exists +( + key_column UInt64, + value Float64 DEFAULT 77.77 +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict' DB 'dictdb')) +LIFETIME(1) +LAYOUT(FLAT()); + +SELECT dictGetFloat64('dictdb.dict_exists', 'value', toUInt64(1)); + +DROP DATABASE IF EXISTS dictdb; diff --git a/dbms/tests/queries/0_stateless/01042_check_query_and_last_granule_size.reference b/dbms/tests/queries/0_stateless/01042_check_query_and_last_granule_size.reference new file mode 100644 index 00000000000..a4fac158712 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01042_check_query_and_last_granule_size.reference @@ -0,0 +1,7 @@ +all_1_1_0 1 +all_1_1_0 1 +all_1_1_0 1 +all_1_1_0 1 +all_1_1_0 1 +all_2_2_0 1 +all_1_2_1 1 diff --git a/dbms/tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql b/dbms/tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql new file mode 100644 index 00000000000..9777ea1dc45 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql @@ -0,0 +1,38 @@ +SET check_query_single_value_result = 0; +DROP TABLE IF EXISTS check_query_test; + +CREATE TABLE check_query_test (SomeKey UInt64, SomeValue String) ENGINE = MergeTree() ORDER BY SomeKey; + +-- Number of rows in last granule should be equals to granularity. +-- Rows in this table are short, so granularity will be 8192. +INSERT INTO check_query_test SELECT number, toString(number) FROM system.numbers LIMIT 81920; + +CHECK TABLE check_query_test; + +OPTIMIZE TABLE check_query_test; + +CHECK TABLE check_query_test; + +DROP TABLE IF EXISTS check_query_test; + +DROP TABLE IF EXISTS check_query_test_non_adaptive; + +CREATE TABLE check_query_test_non_adaptive (SomeKey UInt64, SomeValue String) ENGINE = MergeTree() ORDER BY SomeKey SETTINGS index_granularity_bytes = 0; + +INSERT INTO check_query_test_non_adaptive SELECT number, toString(number) FROM system.numbers LIMIT 81920; + +CHECK TABLE check_query_test_non_adaptive; + +OPTIMIZE TABLE check_query_test_non_adaptive; + +CHECK TABLE check_query_test_non_adaptive; + +INSERT INTO check_query_test_non_adaptive SELECT number, toString(number) FROM system.numbers LIMIT 77; + +CHECK TABLE check_query_test_non_adaptive; + +OPTIMIZE TABLE check_query_test_non_adaptive; + +CHECK TABLE check_query_test_non_adaptive; + +DROP TABLE IF EXISTS check_query_test_non_adaptive; diff --git a/dbms/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.reference b/dbms/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.reference new file mode 100644 index 00000000000..f12dcd8258a --- /dev/null +++ b/dbms/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.reference @@ -0,0 +1,6 @@ +12 -> 102 +13 -> 103 +14 -> -1 +12(r) -> 102 +13(r) -> 103 +14(r) -> 104 diff --git a/dbms/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh b/dbms/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh new file mode 100755 index 00000000000..3181e46f205 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +set -e -o pipefail + +# Run the client. +$CLICKHOUSE_CLIENT --multiquery <<'EOF' +DROP DATABASE IF EXISTS dictdb; +CREATE DATABASE dictdb Engine = Ordinary; +CREATE TABLE dictdb.table(x Int64, y Int64, insert_time DateTime) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO dictdb.table VALUES (12, 102, now()); + +CREATE DICTIONARY dictdb.dict +( + x Int64 DEFAULT -1, + y Int64 DEFAULT -1, + insert_time DateTime +) +PRIMARY KEY x +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table' DB 'dictdb' UPDATE_FIELD 'insert_time')) +LAYOUT(FLAT()) +LIFETIME(1); +EOF + +$CLICKHOUSE_CLIENT --query "SELECT '12 -> ', dictGetInt64('dictdb.dict', 'y', toUInt64(12))" + +$CLICKHOUSE_CLIENT --query "INSERT INTO dictdb.table VALUES (13, 103, now())" +$CLICKHOUSE_CLIENT --query "INSERT INTO dictdb.table VALUES (14, 104, now() - INTERVAL 1 DAY)" + +while [ $($CLICKHOUSE_CLIENT --query "SELECT dictGetInt64('dictdb.dict', 'y', toUInt64(13))") = -1 ] + do + sleep 0.5 + done + +$CLICKHOUSE_CLIENT --query "SELECT '13 -> ', dictGetInt64('dictdb.dict', 'y', toUInt64(13))" +$CLICKHOUSE_CLIENT --query "SELECT '14 -> ', dictGetInt64('dictdb.dict', 'y', toUInt64(14))" + +$CLICKHOUSE_CLIENT --query "SYSTEM RELOAD DICTIONARY 'dictdb.dict'" + +$CLICKHOUSE_CLIENT --query "SELECT '12(r) -> ', dictGetInt64('dictdb.dict', 'y', toUInt64(12))" +$CLICKHOUSE_CLIENT --query "SELECT '13(r) -> ', dictGetInt64('dictdb.dict', 'y', toUInt64(13))" +$CLICKHOUSE_CLIENT --query "SELECT '14(r) -> ', dictGetInt64('dictdb.dict', 'y', toUInt64(14))" + +$CLICKHOUSE_CLIENT --query "DROP DATABASE IF EXISTS dictdb" diff --git a/dbms/tests/queries/0_stateless/01043_dictionary_attribute_properties_values.reference b/dbms/tests/queries/0_stateless/01043_dictionary_attribute_properties_values.reference new file mode 100644 index 00000000000..0196135bda0 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01043_dictionary_attribute_properties_values.reference @@ -0,0 +1,4 @@ +hello +world +21844 +xxx diff --git a/dbms/tests/queries/0_stateless/01043_dictionary_attribute_properties_values.sql b/dbms/tests/queries/0_stateless/01043_dictionary_attribute_properties_values.sql new file mode 100644 index 00000000000..afd1c1c5780 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01043_dictionary_attribute_properties_values.sql @@ -0,0 +1,27 @@ +DROP DATABASE IF EXISTS dictdb; +CREATE DATABASE dictdb Engine = Ordinary; + +CREATE TABLE dictdb.dicttbl(key Int64, value_default String, value_expression String) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO dictdb.dicttbl VALUES (12, 'hello', '55:66:77'); + + +CREATE DICTIONARY dictdb.dict +( + key Int64 DEFAULT -1, + value_default String DEFAULT 'world', + value_expression String DEFAULT 'xxx' EXPRESSION 'toString(127 * 172)' + +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dicttbl' DB 'dictdb')) +LAYOUT(FLAT()) +LIFETIME(1); + + +SELECT dictGetString('dictdb.dict', 'value_default', toUInt64(12)); +SELECT dictGetString('dictdb.dict', 'value_default', toUInt64(14)); + +SELECT dictGetString('dictdb.dict', 'value_expression', toUInt64(12)); +SELECT dictGetString('dictdb.dict', 'value_expression', toUInt64(14)); + +DROP DATABASE IF EXISTS dictdb; diff --git a/dbms/tests/queries/0_stateless/01043_geo_distance.reference b/dbms/tests/queries/0_stateless/01043_geo_distance.reference new file mode 100644 index 00000000000..543fa5f31a9 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01043_geo_distance.reference @@ -0,0 +1,8 @@ +111195.05 +111195.05 +110567.33 +111699.25 +10007554 +10007554 +10007554 +10001780 diff --git a/dbms/tests/queries/0_stateless/01043_geo_distance.sql b/dbms/tests/queries/0_stateless/01043_geo_distance.sql new file mode 100644 index 00000000000..c1fb29b9eb7 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01043_geo_distance.sql @@ -0,0 +1,11 @@ +SELECT greatCircleDistance(0., 0., 0., 1.); +SELECT greatCircleDistance(0., 89., 0, 90.); + +SELECT geoDistance(0., 0., 0., 1.); +SELECT geoDistance(0., 89., 0., 90.); + +SELECT greatCircleDistance(0., 0., 90., 0.); +SELECT greatCircleDistance(0., 0., 0., 90.); + +SELECT geoDistance(0., 0., 90., 0.); +SELECT geoDistance(0., 0., 0., 90.); diff --git a/dbms/tests/queries/0_stateless/01044_great_circle_angle.reference b/dbms/tests/queries/0_stateless/01044_great_circle_angle.reference new file mode 100644 index 00000000000..b0e80d4cdae --- /dev/null +++ b/dbms/tests/queries/0_stateless/01044_great_circle_angle.reference @@ -0,0 +1,101 @@ +1 +-179 -0.06 +-178 -0.02 +-177 -0.02 +-176 -0.01 +-174 -0.01 +174 -0.01 +176 -0.01 +177 -0.02 +178 -0.02 +179 -0.06 + +██ +████ +██████▏ +████████▎ +██████████▎ +████████████▍ +██████████████▍ +████████████████▌ +██████████████████▌ +████████████████████▌ +██████████████████████▋ +████████████████████████▋ +██████████████████████████▌ +████████████████████████████▍ +██████████████████████████████▍ +████████████████████████████████▎ +██████████████████████████████████▎ +████████████████████████████████████▏ +██████████████████████████████████████ +███████████████████████████████████████▊ +█████████████████████████████████████████▋ +███████████████████████████████████████████▌ +█████████████████████████████████████████████▎ +███████████████████████████████████████████████ +████████████████████████████████████████████████▋ +██████████████████████████████████████████████████▌ +████████████████████████████████████████████████████▏ +█████████████████████████████████████████████████████▋ +███████████████████████████████████████████████████████▍ +█████████████████████████████████████████████████████████ +██████████████████████████████████████████████████████████▌ +████████████████████████████████████████████████████████████ +█████████████████████████████████████████████████████████████▌ +██████████████████████████████████████████████████████████████▊ +████████████████████████████████████████████████████████████████▎ +█████████████████████████████████████████████████████████████████▌ +██████████████████████████████████████████████████████████████████▋ +████████████████████████████████████████████████████████████████████ +█████████████████████████████████████████████████████████████████████▏ +██████████████████████████████████████████████████████████████████████▎ +███████████████████████████████████████████████████████████████████████▎ +████████████████████████████████████████████████████████████████████████▎ +█████████████████████████████████████████████████████████████████████████▎ +██████████████████████████████████████████████████████████████████████████▏ +███████████████████████████████████████████████████████████████████████████ +███████████████████████████████████████████████████████████████████████████▋ +████████████████████████████████████████████████████████████████████████████▍ +█████████████████████████████████████████████████████████████████████████████ +█████████████████████████████████████████████████████████████████████████████▌ +█████████████████████████████████████████████████████████████████████████████▊ +██████████████████████████████████████████████████████████████████████████████▎ +██████████████████████████████████████████████████████████████████████████████▌ +██████████████████████████████████████████████████████████████████████████████▋ +██████████████████████████████████████████████████████████████████████████████▊ +██████████████████████████████████████████████████████████████████████████████▊ +██████████████████████████████████████████████████████████████████████████████▋ +██████████████████████████████████████████████████████████████████████████████▋ +██████████████████████████████████████████████████████████████████████████████▍ +██████████████████████████████████████████████████████████████████████████████ +█████████████████████████████████████████████████████████████████████████████▌ +█████████████████████████████████████████████████████████████████████████████ +████████████████████████████████████████████████████████████████████████████▎ +███████████████████████████████████████████████████████████████████████████▌ +██████████████████████████████████████████████████████████████████████████▌ +█████████████████████████████████████████████████████████████████████████▌ +████████████████████████████████████████████████████████████████████████▎ +███████████████████████████████████████████████████████████████████████ +█████████████████████████████████████████████████████████████████████▋ +████████████████████████████████████████████████████████████████████ +██████████████████████████████████████████████████████████████████▍ +████████████████████████████████████████████████████████████████▌ +██████████████████████████████████████████████████████████████▌ +████████████████████████████████████████████████████████████▍ +██████████████████████████████████████████████████████████▏ +███████████████████████████████████████████████████████▋ +█████████████████████████████████████████████████████▏ +██████████████████████████████████████████████████▍ +███████████████████████████████████████████████▌ +████████████████████████████████████████████▌ +█████████████████████████████████████████▎ +█████████████████████████████████████▊ +██████████████████████████████████▍ +██████████████████████████████▋ +██████████████████████████▋ +██████████████████████▋ +██████████████████▌ +██████████████▏ +█████████▌ +████▊ diff --git a/dbms/tests/queries/0_stateless/01044_great_circle_angle.sql b/dbms/tests/queries/0_stateless/01044_great_circle_angle.sql new file mode 100644 index 00000000000..0fac783ddca --- /dev/null +++ b/dbms/tests/queries/0_stateless/01044_great_circle_angle.sql @@ -0,0 +1,3 @@ +WITH number - 90 AS lat SELECT DISTINCT greatCircleAngle(0, 0, 0, lat) = abs(lat) FROM numbers(180); +WITH number - 180 AS lon SELECT lon, round(greatCircleAngle(0, 0, lon, 0) - abs(lon) AS err, 2) FROM numbers(360) WHERE abs(err) > 0.01; +SELECT bar((greatCircleAngle(0, 0, number, number) - number) * 100, 0, 2000, 100) FROM numbers(90); diff --git a/debian/changelog b/debian/changelog index 0ce071ce5e6..2064c577b8c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,5 +1,5 @@ -clickhouse (19.18.1.1) unstable; urgency=low +clickhouse (19.19.1.1) unstable; urgency=low * Modified source code - -- clickhouse-release Fri, 08 Nov 2019 11:36:37 +0300 + -- clickhouse-release Fri, 06 Dec 2019 17:21:55 +0300 diff --git a/debian/clickhouse-server.postinst b/debian/clickhouse-server.postinst index c47a8ef4be2..4a1f4d9d387 100644 --- a/debian/clickhouse-server.postinst +++ b/debian/clickhouse-server.postinst @@ -4,6 +4,7 @@ set -e CLICKHOUSE_USER=${CLICKHOUSE_USER:=clickhouse} CLICKHOUSE_GROUP=${CLICKHOUSE_GROUP:=${CLICKHOUSE_USER}} +# Please note that we don't support paths with whitespaces. This is rather ignorant. CLICKHOUSE_CONFDIR=${CLICKHOUSE_CONFDIR:=/etc/clickhouse-server} CLICKHOUSE_DATADIR=${CLICKHOUSE_DATADIR:=/var/lib/clickhouse} CLICKHOUSE_LOGDIR=${CLICKHOUSE_LOGDIR:=/var/log/clickhouse-server} @@ -135,6 +136,8 @@ Please fix this and reinstall this package." >&2 defaultpassword="$RET" if [ -n "$defaultpassword" ]; then echo "$defaultpassword" > ${CLICKHOUSE_CONFDIR}/users.d/default-password.xml + chown ${CLICKHOUSE_USER}:${CLICKHOUSE_GROUP} ${CLICKHOUSE_CONFDIR}/users.d/default-password.xml + chmod 600 ${CLICKHOUSE_CONFDIR}/users.d/default-password.xml fi # everything went well, so now let's reset the password diff --git a/docker/client/Dockerfile b/docker/client/Dockerfile index 556aee41a24..af6ef5b0395 100644 --- a/docker/client/Dockerfile +++ b/docker/client/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/" -ARG version=19.18.1.* +ARG version=19.19.1.* RUN apt-get update \ && apt-get install --yes --no-install-recommends \ diff --git a/docker/server/Dockerfile b/docker/server/Dockerfile index ba47bb3d842..4757a20b622 100644 --- a/docker/server/Dockerfile +++ b/docker/server/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/" -ARG version=19.18.1.* +ARG version=19.19.1.* ARG gosu_ver=1.10 RUN apt-get update \ diff --git a/docker/test/Dockerfile b/docker/test/Dockerfile index 5708284b15f..147213a77e3 100644 --- a/docker/test/Dockerfile +++ b/docker/test/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/" -ARG version=19.18.1.* +ARG version=19.19.1.* RUN apt-get update && \ apt-get install -y apt-transport-https dirmngr && \ diff --git a/docker/test/integration/Dockerfile b/docker/test/integration/Dockerfile index c5f4629ba72..b12cd1f84eb 100644 --- a/docker/test/integration/Dockerfile +++ b/docker/test/integration/Dockerfile @@ -4,7 +4,7 @@ FROM ubuntu:18.04 RUN echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" >> /etc/apt/sources.list RUN apt-get update \ - && env DEBIAN_FRONTEND=noninteractive apt-get -y install tzdata python llvm-6.0 llvm-6.0-dev libreadline-dev libicu-dev bsdutils llvm-8 \ + && env DEBIAN_FRONTEND=noninteractive apt-get -y install tzdata python llvm-6.0 llvm-6.0-dev libreadline-dev libicu-dev bsdutils llvm-8 gdb \ && rm -rf \ /var/lib/apt/lists/* \ /var/cache/debconf \ diff --git a/docs/README.md b/docs/README.md index 5432b3e1824..fc7fabc268b 100644 --- a/docs/README.md +++ b/docs/README.md @@ -2,13 +2,14 @@ ClickHouse uses the "documentation as code" approach, so you can edit Markdown files in this folder from the GitHub web interface. Alternatively, fork the ClickHouse repository, edit, commit, push, and open a pull request. -At the moment documentation is bilingual in English and Russian. Try to keep all languages in sync if you can, but this is not strictly required. There are people who are responsible for monitoring language versions and syncing them. If you add a new article, you should also add it to `toc_{en,ru,zh,fa}.yaml` files with the pages index. +At the moment documentation is bilingual in English and Russian. Try to keep all languages in sync if you can, but this is not strictly required. There are people who are responsible for monitoring language versions and syncing them. If you add a new article, you should also add it to `toc_{en,ru,zh,ja,fa}.yaml` files with the pages index. The master branch is then asynchronously published to the ClickHouse official website: * In English: https://clickhouse.yandex/docs/en/ * In Russian: https://clickhouse.yandex/docs/ru/ * In Chinese: https://clickhouse.yandex/docs/zh/ +* In Japanese: https://clickhouse.yandex/docs/ja/ * In Farsi: https://clickhouse.yandex/docs/fa/ The infrastructure to build Markdown for publishing on the documentation website resides in the [tools](tools) folder. It has its own [README.md](tools/README.md) file with more details. diff --git a/docs/en/data_types/special_data_types/interval.md b/docs/en/data_types/special_data_types/interval.md index 73a9ced5eb3..b0d152e69bd 100644 --- a/docs/en/data_types/special_data_types/interval.md +++ b/docs/en/data_types/special_data_types/interval.md @@ -3,11 +3,11 @@ The family of data types representing time and date intervals. The resulting types of the [INTERVAL](../../query_language/operators.md#operator-interval) operator. !!! warning "Warning" - You can't use the `Interval` data types for storing values in tables. + You can't use `Interval` data types for storing values in tables. Structure: -- Time interval as unsigned integer value. +- Time interval as an unsigned integer value. - Type of an interval. Supported interval types: @@ -21,7 +21,7 @@ Supported interval types: - `QUARTER` - `YEAR` -For each interval type, there is the separated data type. For example, the `DAY` interval is expressed as the `IntervalDay` data type: +For each interval type, there is a separate data type. For example, the `DAY` interval is expressed as the `IntervalDay` data type: ```sql SELECT toTypeName(INTERVAL 4 DAY) @@ -45,9 +45,9 @@ SELECT now() as current_date_time, current_date_time + INTERVAL 4 DAY └─────────────────────┴───────────────────────────────┘ ``` -Intervals of different types can't be combined. You can't use intervals like `4 DAY 1 HOUR`, express intervals in the units that smaller or equal the the smallest unit of the interval. For example, `1 day and an hour` interval can be expressed as `25 HOUR` or `90000 SECOND`. +Intervals with different types can't be combined. You can't use intervals like `4 DAY 1 HOUR`. Express intervals in units that are smaller or equal to the smallest unit of the interval, for example, the interval `1 day and an hour` interval can be expressed as `25 HOUR` or `90000 SECOND`. -You can't perform arithmetical operations with the `Interval`-type values, but you can add intervals of different types consequently to some value. For example: +You can't perform arithmetical operations with `Interval`-type values, but you can add intervals of different types consequently to values in `Date` or `DateTime` data types. For example: ```sql SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR @@ -58,7 +58,7 @@ SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL └─────────────────────┴────────────────────────────────────────────────────────┘ ``` -The following query causes the exception: +The following query causes an exception: ```sql select now() AS current_date_time, current_date_time + (INTERVAL 4 DAY + INTERVAL 3 HOUR) diff --git a/docs/en/development/build_cross_arm.md b/docs/en/development/build_cross_arm.md new file mode 100644 index 00000000000..4474c72c3f0 --- /dev/null +++ b/docs/en/development/build_cross_arm.md @@ -0,0 +1,35 @@ +# How to Build ClickHouse on Linux for AARCH64 (ARM64) architecture + +This is for the case when you have Linux machine and want to use it to build `clickhouse` binary that will run on another Linux machine with AARCH64 CPU architecture. This is intended for continuous integration checks that run on Linux servers. + +The cross-build for AARCH64 is based on the [Build instructions](build.md), follow them first. + +# Install Clang-8 + +Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup. +For example, in Ubuntu Bionic you can use the following commands: + +```bash +sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" >> /etc/apt/sources.list +sudo apt-get install clang-8 +``` + +# Install Cross-Compilation Toolset + +```bash +cd ClickHouse +cd cmake/toolchain/linux-aarch64 +wget https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz?revision=2e88a73f-d233-4f96-b1f4-d8b36e9bb0b9&la=en -O gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz +tar --strip-components=1 xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz +``` + +# Build ClickHouse + +```bash +cd ClickHouse +mkdir build-arm64 +CC=clang-8 CXX=clang++-8 cmake . -Bbuild-arm64 -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-aarch64.cmake +ninja -C build-arm64 +``` + +The resulting binary will run only on Linux with the AARCH64 CPU architecture. diff --git a/docs/en/development/build_cross.md b/docs/en/development/build_cross_osx.md similarity index 78% rename from docs/en/development/build_cross.md rename to docs/en/development/build_cross_osx.md index 61f0acf5b76..d204620f2a8 100644 --- a/docs/en/development/build_cross.md +++ b/docs/en/development/build_cross_osx.md @@ -2,7 +2,7 @@ This is for the case when you have Linux machine and want to use it to build `clickhouse` binary that will run on OS X. This is intended for continuous integration checks that run on Linux servers. If you want to build ClickHouse directly on Mac OS X, then proceed with another instruction: https://clickhouse.yandex/docs/en/development/build_osx/ -The cross-build for Mac OS X is based on the Build instructions, follow them first. +The cross-build for Mac OS X is based on the [Build instructions](build.md), follow them first. # Install Clang-8 @@ -31,10 +31,15 @@ git clone https://github.com/tpoechtrager/cctools-port.git cd cctools-port/cctools ./configure --prefix=${CCTOOLS} --with-libtapi=${CCTOOLS} --target=x86_64-apple-darwin make install +``` -cd ${CCTOOLS} +Also, we need to download MacOS X SDK into the working tree. + +```bash +cd ClickHouse +cd cmake/toolchain/darwin-x86_64 wget https://github.com/phracker/MacOSX-SDKs/releases/download/10.14-beta4/MacOSX10.14.sdk.tar.xz -tar xJf MacOSX10.14.sdk.tar.xz +tar --strip-components=1 xJf MacOSX10.14.sdk.tar.xz ``` # Build ClickHouse @@ -42,11 +47,10 @@ tar xJf MacOSX10.14.sdk.tar.xz ```bash cd ClickHouse mkdir build-osx -CC=clang-8 CXX=clang++-8 cmake . -Bbuild-osx -DCMAKE_SYSTEM_NAME=Darwin \ +CC=clang-8 CXX=clang++-8 cmake . -Bbuild-osx -DCMAKE_TOOLCHAIN_FILE=cmake/darwin/toolchain-x86_64.cmake \ -DCMAKE_AR:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ar \ -DCMAKE_RANLIB:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ranlib \ - -DLINKER_NAME=${CCTOOLS}/bin/x86_64-apple-darwin-ld \ - -DSDK_PATH=${CCTOOLS}/MacOSX10.14.sdk + -DLINKER_NAME=${CCTOOLS}/bin/x86_64-apple-darwin-ld ninja -C build-osx ``` diff --git a/docs/en/getting_started/example_datasets/metrica.md b/docs/en/getting_started/example_datasets/metrica.md index 19947273338..d89fe54f4eb 100644 --- a/docs/en/getting_started/example_datasets/metrica.md +++ b/docs/en/getting_started/example_datasets/metrica.md @@ -1,51 +1,62 @@ # Anonymized Yandex.Metrica Data -Dataset consists of two tables containing anonymized data about hits (`hits_v1`) and visits (`visits_v1`) of Yandex.Metrica. Each of the tables can be downloaded as a compressed `tsv.xz` file or as prepared partitions. In addition to that, an extended version of the `hits` table containing 100 million rows is available as [TSV](https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_100m_obfuscated_v1.tsv.xz) and as [prepared partitions](https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_100m_obfuscated_v1.tar.xz). +Dataset consists of two tables containing anonymized data about hits (`hits_v1`) and visits (`visits_v1`) of Yandex.Metrica. You can read more about Yandex.Metrica in [ClickHouse history](../../introduction/history.md) section. + +The dataset consists of two tables, either of them can be downloaded as a compressed `tsv.xz` file or as prepared partitions. In addition to that, an extended version of the `hits` table containing 100 million rows is available as TSV at and as prepared partitions at . ## Obtaining Tables from Prepared Partitions -**Download and import hits:** -```bash -$ curl -O https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_v1.tar -$ tar xvf hits_v1.tar -C /var/lib/clickhouse # path to ClickHouse data directory -$ # check permissions on unpacked data, fix if required -$ sudo service clickhouse-server restart -$ clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1" + +Download and import hits table: + +``` bash +curl -O https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_v1.tar +tar xvf hits_v1.tar -C /var/lib/clickhouse # path to ClickHouse data directory +# check permissions on unpacked data, fix if required +sudo service clickhouse-server restart +clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1" ``` -**Download and import visits:** -```bash -$ curl -O https://clickhouse-datasets.s3.yandex.net/visits/partitions/visits_v1.tar -$ tar xvf visits_v1.tar -C /var/lib/clickhouse # path to ClickHouse data directory -$ # check permissions on unpacked data, fix if required -$ sudo service clickhouse-server restart -$ clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" +Download and import visits: + +``` bash +curl -O https://clickhouse-datasets.s3.yandex.net/visits/partitions/visits_v1.tar +tar xvf visits_v1.tar -C /var/lib/clickhouse # path to ClickHouse data directory +# check permissions on unpacked data, fix if required +sudo service clickhouse-server restart +clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" ``` -## Obtaining Tables from Compressed tsv-file -**Download and import hits from compressed tsv-file** -```bash -$ curl https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv -$ # now create table -$ clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets" -$ clickhouse-client --query "CREATE TABLE datasets.hits_v1 ( WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192" -$ # import data -$ cat hits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.hits_v1 FORMAT TSV" --max_insert_block_size=100000 -$ # optionally you can optimize table -$ clickhouse-client --query "OPTIMIZE TABLE datasets.hits_v1 FINAL" -$ clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1" +## Obtaining Tables from Compressed TSV File + +Download and import hits from compressed TSV file: + +``` bash +curl https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv +# now create table +clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets" +clickhouse-client --query "CREATE TABLE datasets.hits_v1 ( WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192" +# import data +cat hits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.hits_v1 FORMAT TSV" --max_insert_block_size=100000 +# optionally you can optimize table +clickhouse-client --query "OPTIMIZE TABLE datasets.hits_v1 FINAL" +clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1" ``` -**Download and import visits from compressed tsv-file** -```bash -$ curl https://clickhouse-datasets.s3.yandex.net/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv -$ # now create table -$ clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets" -$ clickhouse-client --query "CREATE TABLE datasets.visits_v1 ( CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8, VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32, Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String, EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32, SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32, SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16, UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16, FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8, Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), Params Array(String), Goals Nested(ID UInt32, Serial UInt32, EventTime DateTime, Price Int64, OrderID String, CurrencyID UInt32), WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64, ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32, ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32, ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32, ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16, ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime, PredLastVisit Date, LastVisit Date, TotalVisits UInt32, TraficSource Nested(ID Int8, SearchEngineID UInt16, AdvEngineID UInt8, PlaceID UInt16, SocialSourceNetworkID UInt8, Domain String, SearchPhrase String, SocialSourcePage String), Attendance FixedString(16), CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64, StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64, OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64, UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), Market Nested(Type UInt8, GoalID UInt32, OrderID String, OrderPrice Int64, PP UInt32, DirectPlaceID UInt32, DirectOrderID UInt32, DirectBannerID UInt32, GoodID String, GoodName String, GoodQuantity Int32, GoodPrice Int64), IslandID FixedString(16)) ENGINE = CollapsingMergeTree(StartDate, intHash32(UserID), (CounterID, StartDate, intHash32(UserID), VisitID), 8192, Sign)" -$ # import data -$ cat visits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.visits_v1 FORMAT TSV" --max_insert_block_size=100000 -$ # optionally you can optimize table -$ clickhouse-client --query "OPTIMIZE TABLE datasets.visits_v1 FINAL" -$ clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" +Download and import visits from compressed tsv-file: + +``` bash +curl https://clickhouse-datasets.s3.yandex.net/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv +# now create table +clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets" +clickhouse-client --query "CREATE TABLE datasets.visits_v1 ( CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8, VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32, Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String, EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32, SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32, SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16, UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16, FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8, Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), Params Array(String), Goals Nested(ID UInt32, Serial UInt32, EventTime DateTime, Price Int64, OrderID String, CurrencyID UInt32), WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64, ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32, ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32, ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32, ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16, ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime, PredLastVisit Date, LastVisit Date, TotalVisits UInt32, TraficSource Nested(ID Int8, SearchEngineID UInt16, AdvEngineID UInt8, PlaceID UInt16, SocialSourceNetworkID UInt8, Domain String, SearchPhrase String, SocialSourcePage String), Attendance FixedString(16), CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64, StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64, OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64, UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), Market Nested(Type UInt8, GoalID UInt32, OrderID String, OrderPrice Int64, PP UInt32, DirectPlaceID UInt32, DirectOrderID UInt32, DirectBannerID UInt32, GoodID String, GoodName String, GoodQuantity Int32, GoodPrice Int64), IslandID FixedString(16)) ENGINE = CollapsingMergeTree(StartDate, intHash32(UserID), (CounterID, StartDate, intHash32(UserID), VisitID), 8192, Sign)" +# import data +cat visits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.visits_v1 FORMAT TSV" --max_insert_block_size=100000 +# optionally you can optimize table +clickhouse-client --query "OPTIMIZE TABLE datasets.visits_v1 FINAL" +clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" ``` -## Queries -Examples of queries to these tables (they are named `test.hits` and `test.visits`) can be found among [stateful tests](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/tests/queries/1_stateful) and in some [performance tests](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/tests/performance) of ClickHouse. +## Example Queries + +[ClickHouse tutorial](../../getting_started/tutorial.md) is based on Yandex.Metrica dataset and the recommended way to get started with this dataset is to just go through tutorial. + +Additional examples of queries to these tables can be found among [stateful tests](https://github.com/yandex/ClickHouse/tree/master/dbms/tests/queries/1_stateful) of ClickHouse (they are named `test.hists` and `test.visits` there). diff --git a/docs/en/getting_started/index.md b/docs/en/getting_started/index.md index ed7335b748b..bfdcb0e108a 100644 --- a/docs/en/getting_started/index.md +++ b/docs/en/getting_started/index.md @@ -1,147 +1,8 @@ # Getting Started -## System Requirements - -ClickHouse can run on any Linux, FreeBSD or Mac OS X with x86\_64 CPU architecture. - -Though pre-built binaries are typically compiled to leverage SSE 4.2 instruction set, so unless otherwise stated usage of CPU that supports it becomes an additional system requirement. Here's the command to check if current CPU has support for SSE 4.2: - -``` bash -$ grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not supported" -``` - -## Installation - -### From DEB Packages - -Yandex ClickHouse team recommends using official pre-compiled `deb` packages for Debian or Ubuntu. - -To install official packages add the Yandex repository in `/etc/apt/sources.list` or in a separate `/etc/apt/sources.list.d/clickhouse.list` file: - -```bash -$ deb http://repo.yandex.ru/clickhouse/deb/stable/ main/ -``` - -If you want to use the most recent version, replace `stable` with `testing` (this is recommended for your testing environments). - -Then run these commands to actually install packages: - -```bash -$ sudo apt-get install dirmngr # optional -$ sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E0C56BD4 # optional -$ sudo apt-get update -$ sudo apt-get install clickhouse-client clickhouse-server -``` - -You can also download and install packages manually from here: . - -### From RPM Packages - -Yandex ClickHouse team recommends using official pre-compiled `rpm` packages for CentOS, RedHat and all other rpm-based Linux distributions. - -First you need to add the official repository: - -```bash -$ sudo yum install yum-utils -$ sudo rpm --import https://repo.yandex.ru/clickhouse/CLICKHOUSE-KEY.GPG -$ sudo yum-config-manager --add-repo https://repo.yandex.ru/clickhouse/rpm/stable/x86_64 -``` - -If you want to use the most recent version, replace `stable` with `testing` (this is recommended for your testing environments). - -Then run these commands to actually install packages: - -```bash -$ sudo yum install clickhouse-server clickhouse-client -``` - -You can also download and install packages manually from here: . - -### From Docker Image - -To run ClickHouse inside Docker follow the guide on [Docker Hub](https://hub.docker.com/r/yandex/clickhouse-server/). Those images use official `deb` packages inside. - -### From Sources - -To manually compile ClickHouse, follow the instructions for [Linux](../development/build.md) or [Mac OS X](../development/build_osx.md). - -You can compile packages and install them or use programs without installing packages. Also by building manually you can disable SSE 4.2 requirement or build for AArch64 CPUs. - -```text -Client: dbms/programs/clickhouse-client -Server: dbms/programs/clickhouse-server -``` - -You'll need to create a data and metadata folders and `chown` them for the desired user. Their paths can be changed in server config (src/dbms/programs/server/config.xml), by default they are: -```text -/opt/clickhouse/data/default/ -/opt/clickhouse/metadata/default/ -``` - -On Gentoo you can just use `emerge clickhouse` to install ClickHouse from sources. - -## Launch - -To start the server as a daemon, run: - -``` bash -$ sudo service clickhouse-server start -``` - -If you don't have `service` command, run as - -``` bash -$ sudo /etc/init.d/clickhouse-server start -``` - - -See the logs in the `/var/log/clickhouse-server/` directory. - -If the server doesn't start, check the configurations in the file `/etc/clickhouse-server/config.xml`. - -You can also manually launch the server from the console: - -``` bash -$ clickhouse-server --config-file=/etc/clickhouse-server/config.xml -``` - -In this case, the log will be printed to the console, which is convenient during development. -If the configuration file is in the current directory, you don't need to specify the `--config-file` parameter. By default, it uses `./config.xml`. - -ClickHouse supports access restriction settings. They are located in the `users.xml` file (next to `config.xml`). -By default, access is allowed from anywhere for the `default` user, without a password. See `user/default/networks`. -For more information, see the section ["Configuration Files"](../operations/configuration_files.md). - -After launching server, you can use the command-line client to connect to it: - -``` bash -$ clickhouse-client -``` - -By default it connects to `localhost:9000` on behalf of the user `default` without a password. It can also be used to connect to a remote server using `--host` argument. - -The terminal must use UTF-8 encoding. -For more information, see the section ["Command-line client"](../interfaces/cli.md). - -Example: -``` bash -$ ./clickhouse-client -ClickHouse client version 0.0.18749. -Connecting to localhost:9000. -Connected to ClickHouse server version 0.0.18749. -``` -```sql -SELECT 1 -``` -```text -┌─1─┐ -│ 1 │ -└───┘ -``` - -**Congratulations, the system works!** - -To continue experimenting, you can download one of test data sets or go through [tutorial](https://clickhouse.yandex/tutorial.html). +If you are new to ClickHouse and want to get a hands-on feeling of it's performance, first of all you need to go through the [installation process](install.md). After that you can: +* [Go through detailed tutorial](tutorial.md) +* [Experiment with example datasets](example_datasets/ontime.md) [Original article](https://clickhouse.yandex/docs/en/getting_started/) diff --git a/docs/en/getting_started/install.md b/docs/en/getting_started/install.md new file mode 100644 index 00000000000..e47500fa22f --- /dev/null +++ b/docs/en/getting_started/install.md @@ -0,0 +1,153 @@ +# Installation + +## System Requirements + +ClickHouse can run on any Linux, FreeBSD or Mac OS X with x86\_64, AArch64 or PowerPC64LE CPU architecture. + +Official pre-built binaries are typically compiled for x86\_64 and leverage SSE 4.2 instruction set, so unless otherwise stated usage of CPU that supports it becomes an additional system requirement. Here's the command to check if current CPU has support for SSE 4.2: + +``` bash +$ grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not supported" +``` + +To run ClickHouse on processors that do not support SSE 4.2 or have AArch64 or PowerPC64LE architecture, you should [build ClickHouse from sources](#from-sources) with proper configuration adjustments. + +## Available Installation Options + +### From DEB Packages + +It is recommended to use official pre-compiled `deb` packages for Debian or Ubuntu. + +To install official packages add the Yandex repository in `/etc/apt/sources.list` or in a separate `/etc/apt/sources.list.d/clickhouse.list` file: + +``` +deb http://repo.yandex.ru/clickhouse/deb/stable/ main/ +``` + +If you want to use the most recent version, replace `stable` with `testing` (this is recommended for your testing environments). + +Then run these commands to actually install packages: + +```bash +sudo apt-get install dirmngr # optional +sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E0C56BD4 # optional +sudo apt-get update +sudo apt-get install clickhouse-client clickhouse-server +``` + +You can also download and install packages manually from here: . + +### From RPM Packages + +It is recommended to use official pre-compiled `rpm` packages for CentOS, RedHat and all other rpm-based Linux distributions. + +First you need to add the official repository: + +```bash +sudo yum install yum-utils +sudo rpm --import https://repo.yandex.ru/clickhouse/CLICKHOUSE-KEY.GPG +sudo yum-config-manager --add-repo https://repo.yandex.ru/clickhouse/rpm/stable/x86_64 +``` + +If you want to use the most recent version, replace `stable` with `testing` (this is recommended for your testing environments). + +Then run these commands to actually install packages: + +```bash +sudo yum install clickhouse-server clickhouse-client +``` + +You can also download and install packages manually from here: . + +### From Docker Image + +To run ClickHouse inside Docker follow the guide on [Docker Hub](https://hub.docker.com/r/yandex/clickhouse-server/). Those images use official `deb` packages inside. + +### From Sources + +To manually compile ClickHouse, follow the instructions for [Linux](../development/build.md) or [Mac OS X](../development/build_osx.md). + +You can compile packages and install them or use programs without installing packages. Also by building manually you can disable SSE 4.2 requirement or build for AArch64 CPUs. + +``` +Client: dbms/programs/clickhouse-client +Server: dbms/programs/clickhouse-server +``` + +You'll need to create a data and metadata folders and `chown` them for the desired user. Their paths can be changed in server config (src/dbms/programs/server/config.xml), by default they are: +``` +/opt/clickhouse/data/default/ +/opt/clickhouse/metadata/default/ +``` + +On Gentoo you can just use `emerge clickhouse` to install ClickHouse from sources. + +## Launch + +To start the server as a daemon, run: + +``` bash +$ sudo service clickhouse-server start +``` + +If you don't have `service` command, run as + +``` bash +$ sudo /etc/init.d/clickhouse-server start +``` + + +See the logs in the `/var/log/clickhouse-server/` directory. + +If the server doesn't start, check the configurations in the file `/etc/clickhouse-server/config.xml`. + +You can also manually launch the server from the console: + +``` bash +$ clickhouse-server --config-file=/etc/clickhouse-server/config.xml +``` + +In this case, the log will be printed to the console, which is convenient during development. +If the configuration file is in the current directory, you don't need to specify the `--config-file` parameter. By default, it uses `./config.xml`. + +ClickHouse supports access restriction settings. They are located in the `users.xml` file (next to `config.xml`). +By default, access is allowed from anywhere for the `default` user, without a password. See `user/default/networks`. +For more information, see the section ["Configuration Files"](../operations/configuration_files.md). + +After launching server, you can use the command-line client to connect to it: + +``` bash +$ clickhouse-client +``` + +By default it connects to `localhost:9000` on behalf of the user `default` without a password. It can also be used to connect to a remote server using `--host` argument. + +The terminal must use UTF-8 encoding. +For more information, see the section ["Command-line client"](../interfaces/cli.md). + +Example: +``` bash +$ ./clickhouse-client +ClickHouse client version 0.0.18749. +Connecting to localhost:9000. +Connected to ClickHouse server version 0.0.18749. + +:) SELECT 1 + +SELECT 1 + +┌─1─┐ +│ 1 │ +└───┘ + +1 rows in set. Elapsed: 0.003 sec. + +:) +``` + +**Congratulations, the system works!** + +To continue experimenting, you can download one of test data sets or go through [tutorial](https://clickhouse.yandex/tutorial.html). + + +[Original article](https://clickhouse.yandex/docs/en/getting_started/install/) diff --git a/docs/en/getting_started/tutorial.md b/docs/en/getting_started/tutorial.md new file mode 100644 index 00000000000..acdd9074beb --- /dev/null +++ b/docs/en/getting_started/tutorial.md @@ -0,0 +1,645 @@ +# ClickHouse Tutorial + +## What to Expect from This Tutorial? + +By going through this tutorial you'll learn how to set up basic ClickHouse cluster, it'll be small, but fault tolerant and scalable. We will use one of example datasets to fill it with data and execute some demo queries. + +## Single Node Setup + +To postpone complexities of distributed environment, we'll start with deploying ClickHouse on a single server or virtual machine. ClickHouse is usually installed from [deb](index.md#from-deb-packages) or [rpm](index.md#from-rpm-packages) packages, but there are [alternatives](index.md#from-docker-image) for the operating systems that do no support them. + +For example, you have chosen `deb` packages and executed: +``` bash +sudo apt-get install dirmngr +sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E0C56BD4 + +echo "deb http://repo.yandex.ru/clickhouse/deb/stable/ main/" | sudo tee /etc/apt/sources.list.d/clickhouse.list +sudo apt-get update + +sudo apt-get install -y clickhouse-server clickhouse-client +``` + +What do we have in the packages that got installed: + +* `clickhouse-client` package contains [clickhouse-client](../interfaces/cli.md) application, interactive ClickHouse console client. +* `clickhouse-common` package contains a ClickHouse executable file. +* `clickhouse-server` package contains configuration files to run ClickHouse as a server. + +Server config files are located in `/etc/clickhouse-server/`. Before going further please notice the `` element in `config.xml`. Path determines the location for data storage, so it should be located on volume with large disk capacity, the default value is `/var/lib/clickhouse/`. If you want to adjust the configuration it's not really handy to directly edit `config.xml` file, considering it might get rewritten on future package updates. Recommended way to override the config elements is to create [files in config.d directory](../operations/configuration_files.md) which serve as "patches" to config.xml. + +As you might have noticed, `clickhouse-server` is not launched automatically after package installation. It won't be automatically restarted after updates either. The way you start the server depends on your init system, usually it's: + +``` bash +sudo service clickhouse-server start +``` +or + +``` bash +sudo /etc/init.d/clickhouse-server start +``` + +The default location for server logs is `/var/log/clickhouse-server/`. Server will be ready to handle client connections once `Ready for connections` message was logged. + +Once the `clickhouse-server` is up and running, we can use `clickhouse-client` to connect to the server and run some test queries like `SELECT "Hello, world!";`. + +
Quick tips for clickhouse-client +Interactive mode: +``` bash +clickhouse-client +clickhouse-client --host=... --port=... --user=... --password=... +``` + +Enable multiline queries: +``` bash +clickhouse-client -m +clickhouse-client --multiline +``` + +Run queries in batch-mode: +``` bash +clickhouse-client --query='SELECT 1' +echo 'SELECT 1' | clickhouse-client +clickhouse-client <<< 'SELECT 1' +``` + +Insert data from a file in specified format: +``` bash +clickhouse-client --query='INSERT INTO table VALUES' < data.txt +clickhouse-client --query='INSERT INTO table FORMAT TabSeparated' < data.tsv +``` +
+ +## Import Sample Dataset + +Now it's time to fill our ClickHouse server with some sample data. In this tutorial we'll use anonymized data of Yandex.Metrica, the first service that run ClickHouse in production way before it became open-source (more on that in [history section](../introduction/history.md)). There are [multiple ways to import Yandex.Metrica dataset](example_datasets/metrica.md) and for the sake of the tutorial we'll go with the most realistic one. + +### Download and Extract Table Data + +``` bash +curl https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv +curl https://clickhouse-datasets.s3.yandex.net/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv +``` + +The extracted files are about 10GB in size. + +### Create Tables + +Tables are logically grouped into "databases". There's a `default` database, but we'll create a new one named `tutorial`: + +``` bash +clickhouse-client --query "CREATE DATABASE IF NOT EXISTS tutorial" +``` + +Syntax for creating tables is way more complicated compared to databases (see [reference](../query_language/create.md). In general `CREATE TABLE` statement has to specify three key things: + +1. Name of table to create. +2. Table schema, i.e. list of columns and their [data types](../data_types/index.md). +3. [Table engine](../operations/table_engines/index.md) and it's settings, which determines all the details on how queries to this table will be physically executed. + +Yandex.Metrica is a web analytics service and sample dataset doesn't cover it's full functionality, so there are only two tables to create: + +* `hits` is a table with each action done by all users on all websites covered by the service. +* `visits` is a table that contains pre-built sessions instead of individual actions. + +Let's see and execute the real create table queries for these tables: + +``` sql +CREATE TABLE tutorial.hits_v1 +( + `WatchID` UInt64, + `JavaEnable` UInt8, + `Title` String, + `GoodEvent` Int16, + `EventTime` DateTime, + `EventDate` Date, + `CounterID` UInt32, + `ClientIP` UInt32, + `ClientIP6` FixedString(16), + `RegionID` UInt32, + `UserID` UInt64, + `CounterClass` Int8, + `OS` UInt8, + `UserAgent` UInt8, + `URL` String, + `Referer` String, + `URLDomain` String, + `RefererDomain` String, + `Refresh` UInt8, + `IsRobot` UInt8, + `RefererCategories` Array(UInt16), + `URLCategories` Array(UInt16), + `URLRegions` Array(UInt32), + `RefererRegions` Array(UInt32), + `ResolutionWidth` UInt16, + `ResolutionHeight` UInt16, + `ResolutionDepth` UInt8, + `FlashMajor` UInt8, + `FlashMinor` UInt8, + `FlashMinor2` String, + `NetMajor` UInt8, + `NetMinor` UInt8, + `UserAgentMajor` UInt16, + `UserAgentMinor` FixedString(2), + `CookieEnable` UInt8, + `JavascriptEnable` UInt8, + `IsMobile` UInt8, + `MobilePhone` UInt8, + `MobilePhoneModel` String, + `Params` String, + `IPNetworkID` UInt32, + `TraficSourceID` Int8, + `SearchEngineID` UInt16, + `SearchPhrase` String, + `AdvEngineID` UInt8, + `IsArtifical` UInt8, + `WindowClientWidth` UInt16, + `WindowClientHeight` UInt16, + `ClientTimeZone` Int16, + `ClientEventTime` DateTime, + `SilverlightVersion1` UInt8, + `SilverlightVersion2` UInt8, + `SilverlightVersion3` UInt32, + `SilverlightVersion4` UInt16, + `PageCharset` String, + `CodeVersion` UInt32, + `IsLink` UInt8, + `IsDownload` UInt8, + `IsNotBounce` UInt8, + `FUniqID` UInt64, + `HID` UInt32, + `IsOldCounter` UInt8, + `IsEvent` UInt8, + `IsParameter` UInt8, + `DontCountHits` UInt8, + `WithHash` UInt8, + `HitColor` FixedString(1), + `UTCEventTime` DateTime, + `Age` UInt8, + `Sex` UInt8, + `Income` UInt8, + `Interests` UInt16, + `Robotness` UInt8, + `GeneralInterests` Array(UInt16), + `RemoteIP` UInt32, + `RemoteIP6` FixedString(16), + `WindowName` Int32, + `OpenerName` Int32, + `HistoryLength` Int16, + `BrowserLanguage` FixedString(2), + `BrowserCountry` FixedString(2), + `SocialNetwork` String, + `SocialAction` String, + `HTTPError` UInt16, + `SendTiming` Int32, + `DNSTiming` Int32, + `ConnectTiming` Int32, + `ResponseStartTiming` Int32, + `ResponseEndTiming` Int32, + `FetchTiming` Int32, + `RedirectTiming` Int32, + `DOMInteractiveTiming` Int32, + `DOMContentLoadedTiming` Int32, + `DOMCompleteTiming` Int32, + `LoadEventStartTiming` Int32, + `LoadEventEndTiming` Int32, + `NSToDOMContentLoadedTiming` Int32, + `FirstPaintTiming` Int32, + `RedirectCount` Int8, + `SocialSourceNetworkID` UInt8, + `SocialSourcePage` String, + `ParamPrice` Int64, + `ParamOrderID` String, + `ParamCurrency` FixedString(3), + `ParamCurrencyID` UInt16, + `GoalsReached` Array(UInt32), + `OpenstatServiceName` String, + `OpenstatCampaignID` String, + `OpenstatAdID` String, + `OpenstatSourceID` String, + `UTMSource` String, + `UTMMedium` String, + `UTMCampaign` String, + `UTMContent` String, + `UTMTerm` String, + `FromTag` String, + `HasGCLID` UInt8, + `RefererHash` UInt64, + `URLHash` UInt64, + `CLID` UInt32, + `YCLID` UInt64, + `ShareService` String, + `ShareURL` String, + `ShareTitle` String, + `ParsedParams` Nested( + Key1 String, + Key2 String, + Key3 String, + Key4 String, + Key5 String, + ValueDouble Float64), + `IslandID` FixedString(16), + `RequestNum` UInt32, + `RequestTry` UInt8 +) +ENGINE = MergeTree() +PARTITION BY toYYYYMM(EventDate) +ORDER BY (CounterID, EventDate, intHash32(UserID)) +SAMPLE BY intHash32(UserID) +SETTINGS index_granularity = 8192 +``` + +``` sql +CREATE TABLE tutorial.visits_v1 +( + `CounterID` UInt32, + `StartDate` Date, + `Sign` Int8, + `IsNew` UInt8, + `VisitID` UInt64, + `UserID` UInt64, + `StartTime` DateTime, + `Duration` UInt32, + `UTCStartTime` DateTime, + `PageViews` Int32, + `Hits` Int32, + `IsBounce` UInt8, + `Referer` String, + `StartURL` String, + `RefererDomain` String, + `StartURLDomain` String, + `EndURL` String, + `LinkURL` String, + `IsDownload` UInt8, + `TraficSourceID` Int8, + `SearchEngineID` UInt16, + `SearchPhrase` String, + `AdvEngineID` UInt8, + `PlaceID` Int32, + `RefererCategories` Array(UInt16), + `URLCategories` Array(UInt16), + `URLRegions` Array(UInt32), + `RefererRegions` Array(UInt32), + `IsYandex` UInt8, + `GoalReachesDepth` Int32, + `GoalReachesURL` Int32, + `GoalReachesAny` Int32, + `SocialSourceNetworkID` UInt8, + `SocialSourcePage` String, + `MobilePhoneModel` String, + `ClientEventTime` DateTime, + `RegionID` UInt32, + `ClientIP` UInt32, + `ClientIP6` FixedString(16), + `RemoteIP` UInt32, + `RemoteIP6` FixedString(16), + `IPNetworkID` UInt32, + `SilverlightVersion3` UInt32, + `CodeVersion` UInt32, + `ResolutionWidth` UInt16, + `ResolutionHeight` UInt16, + `UserAgentMajor` UInt16, + `UserAgentMinor` UInt16, + `WindowClientWidth` UInt16, + `WindowClientHeight` UInt16, + `SilverlightVersion2` UInt8, + `SilverlightVersion4` UInt16, + `FlashVersion3` UInt16, + `FlashVersion4` UInt16, + `ClientTimeZone` Int16, + `OS` UInt8, + `UserAgent` UInt8, + `ResolutionDepth` UInt8, + `FlashMajor` UInt8, + `FlashMinor` UInt8, + `NetMajor` UInt8, + `NetMinor` UInt8, + `MobilePhone` UInt8, + `SilverlightVersion1` UInt8, + `Age` UInt8, + `Sex` UInt8, + `Income` UInt8, + `JavaEnable` UInt8, + `CookieEnable` UInt8, + `JavascriptEnable` UInt8, + `IsMobile` UInt8, + `BrowserLanguage` UInt16, + `BrowserCountry` UInt16, + `Interests` UInt16, + `Robotness` UInt8, + `GeneralInterests` Array(UInt16), + `Params` Array(String), + `Goals` Nested( + ID UInt32, + Serial UInt32, + EventTime DateTime, + Price Int64, + OrderID String, + CurrencyID UInt32), + `WatchIDs` Array(UInt64), + `ParamSumPrice` Int64, + `ParamCurrency` FixedString(3), + `ParamCurrencyID` UInt16, + `ClickLogID` UInt64, + `ClickEventID` Int32, + `ClickGoodEvent` Int32, + `ClickEventTime` DateTime, + `ClickPriorityID` Int32, + `ClickPhraseID` Int32, + `ClickPageID` Int32, + `ClickPlaceID` Int32, + `ClickTypeID` Int32, + `ClickResourceID` Int32, + `ClickCost` UInt32, + `ClickClientIP` UInt32, + `ClickDomainID` UInt32, + `ClickURL` String, + `ClickAttempt` UInt8, + `ClickOrderID` UInt32, + `ClickBannerID` UInt32, + `ClickMarketCategoryID` UInt32, + `ClickMarketPP` UInt32, + `ClickMarketCategoryName` String, + `ClickMarketPPName` String, + `ClickAWAPSCampaignName` String, + `ClickPageName` String, + `ClickTargetType` UInt16, + `ClickTargetPhraseID` UInt64, + `ClickContextType` UInt8, + `ClickSelectType` Int8, + `ClickOptions` String, + `ClickGroupBannerID` Int32, + `OpenstatServiceName` String, + `OpenstatCampaignID` String, + `OpenstatAdID` String, + `OpenstatSourceID` String, + `UTMSource` String, + `UTMMedium` String, + `UTMCampaign` String, + `UTMContent` String, + `UTMTerm` String, + `FromTag` String, + `HasGCLID` UInt8, + `FirstVisit` DateTime, + `PredLastVisit` Date, + `LastVisit` Date, + `TotalVisits` UInt32, + `TraficSource` Nested( + ID Int8, + SearchEngineID UInt16, + AdvEngineID UInt8, + PlaceID UInt16, + SocialSourceNetworkID UInt8, + Domain String, + SearchPhrase String, + SocialSourcePage String), + `Attendance` FixedString(16), + `CLID` UInt32, + `YCLID` UInt64, + `NormalizedRefererHash` UInt64, + `SearchPhraseHash` UInt64, + `RefererDomainHash` UInt64, + `NormalizedStartURLHash` UInt64, + `StartURLDomainHash` UInt64, + `NormalizedEndURLHash` UInt64, + `TopLevelDomain` UInt64, + `URLScheme` UInt64, + `OpenstatServiceNameHash` UInt64, + `OpenstatCampaignIDHash` UInt64, + `OpenstatAdIDHash` UInt64, + `OpenstatSourceIDHash` UInt64, + `UTMSourceHash` UInt64, + `UTMMediumHash` UInt64, + `UTMCampaignHash` UInt64, + `UTMContentHash` UInt64, + `UTMTermHash` UInt64, + `FromHash` UInt64, + `WebVisorEnabled` UInt8, + `WebVisorActivity` UInt32, + `ParsedParams` Nested( + Key1 String, + Key2 String, + Key3 String, + Key4 String, + Key5 String, + ValueDouble Float64), + `Market` Nested( + Type UInt8, + GoalID UInt32, + OrderID String, + OrderPrice Int64, + PP UInt32, + DirectPlaceID UInt32, + DirectOrderID UInt32, + DirectBannerID UInt32, + GoodID String, + GoodName String, + GoodQuantity Int32, + GoodPrice Int64), + `IslandID` FixedString(16) +) +ENGINE = CollapsingMergeTree(Sign) +PARTITION BY toYYYYMM(StartDate) +ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) +SAMPLE BY intHash32(UserID) +SETTINGS index_granularity = 8192 +``` + +You can execute those queries using interactive mode of `clickhouse-client` (just launch it in terminal without specifying a query in advance) or try some [alternative interface](../interfaces/index.md) if you ant. + +As we can see, `hits_v1` uses the [basic MergeTree engine](../operations/table_engines/mergetree.md), while the `visits_v1` uses the [Collapsing](../operations/table_engines/collapsingmergetree.md) variant. + +### Import Data + +Data import to ClickHouse is done via [INSERT INTO](../query_language/insert_into.md) query like in many other SQL databases. However data is usually provided in one of the [supported formats](../interfaces/formats.md) instead of `VALUES` clause (which is also supported). + +The files we downloaded earlier are in tab-separated format, so here's how to import them via console client: + +``` bash +clickhouse-client --query "INSERT INTO tutorial.hits_v1 FORMAT TSV" --max_insert_block_size=100000 < hits_v1.tsv +clickhouse-client --query "INSERT INTO tutorial.visits_v1 FORMAT TSV" --max_insert_block_size=100000 < visits_v1.tsv +``` + +ClickHouse has a lot of [settings to tune](../operations/settings/index.md) and one way to specify them in console client is via arguments, as we can see with `--max_insert_block_size`. The easiest way to figure out what settings are available, what do they mean and what the defaults are is to query the `system.settings` table: + +``` sql +SELECT name, value, changed, description +FROM system.settings +WHERE name LIKE '%max_insert_b%' +FORMAT TSV + +max_insert_block_size 1048576 0 "The maximum block size for insertion, if we control the creation of blocks for insertion." +``` + +Optionally you can [OPTIMIZE](../query_language/misc/#misc_operations-optimize) the tables after import. Tables that are configured with MergeTree-family engine always do merges of data parts in background to optimize data storage (or at least check if it makes sense). These queries will just force table engine to do storage optimization right now instead of some time later: +``` bash +clickhouse-client --query "OPTIMIZE TABLE tutorial.hits_v1 FINAL" +clickhouse-client --query "OPTIMIZE TABLE tutorial.visits_v1 FINAL" +``` + +This is I/O and CPU intensive operation so if the table constantly receives new data it's better to leave it alone and let merges run in background. + +Now we can check that the tables are successfully imported: +``` bash +clickhouse-client --query "SELECT COUNT(*) FROM tutorial.hits_v1" +clickhouse-client --query "SELECT COUNT(*) FROM tutorial.visits_v1" +``` + +## Example Queries + +``` sql +SELECT + StartURL AS URL, + AVG(Duration) AS AvgDuration +FROM tutorial.visits_v1 +WHERE StartDate BETWEEN '2014-03-23' AND '2014-03-30' +GROUP BY URL +ORDER BY AvgDuration DESC +LIMIT 10 +``` + +``` sql +SELECT + sum(Sign) AS visits, + sumIf(Sign, has(Goals.ID, 1105530)) AS goal_visits, + (100. * goal_visits) / visits AS goal_percent +FROM tutorial.visits_v1 +WHERE (CounterID = 912887) AND (toYYYYMM(StartDate) = 201403) AND (domain(StartURL) = 'yandex.ru') +``` + +## Cluster Deployment + +ClickHouse cluster is a homogenous cluster. Steps to set up: + +1. Install ClickHouse server on all machines of the cluster +2. Set up cluster configs in configuration files +3. Create local tables on each instance +4. Create a [Distributed table](../operations/table_engines/distributed.md) + +[Distributed table](../operations/table_engines/distributed.md) is actually a kind of "view" to local tables of ClickHouse cluster. SELECT query from a distributed table will be executed using resources of all cluster's shards. You may specify configs for multiple clusters and create multiple distributed tables providing views to different clusters. + +Example config for cluster with three shards, one replica each: +``` xml + + + + + example-perftest01j.yandex.ru + 9000 + + + + + example-perftest02j.yandex.ru + 9000 + + + + + example-perftest03j.yandex.ru + 9000 + + + + +``` + +For further demonstration let's create new local table with exactly the same `CREATE TABLE` query that we used for `hits_v1`, but different table name: +``` sql +CREATE TABLE tutorial.hits_local (...) ENGINE = MergeTree() ... +``` + +Creating a distributed table providing a view into local tables of the cluster: +``` sql +CREATE TABLE tutorial.hits_all AS tutorial.hits_local +ENGINE = Distributed(perftest_3shards_1replicas, tutorial, hits_local, rand()); +``` + +Common practice is to create similar Distributed tables on all machines of the cluster. This would allow to run distributed queries on any machine of the cluster. Also there's an alternative option to create temporary distributed table for a given SELECT query using [remote](../query_language/table_functions/remote.md) table function. + +Let's run [INSERT SELECT](../query_language/insert_into.md) into Distributed table to spread the table to multiple servers. + +``` sql +INSERT INTO tutorial.hits_all SELECT * FROM tutorial.hits_v1; +``` + +!!! warning "Notice" + This approach is not suitable for sharding of large tables. There's a separate tool [clickhouse-copier](../operations/utils/clickhouse-copier.md) that can re-shard arbitrary large tables. + +As you could expect computationally heavy queries are executed N times faster being launched on 3 servers instead of one. + +In this case we have used a cluster with 3 shards each contains a single replica. + +To provide resilience in production environment we recommend that each shard should contain 2-3 replicas distributed between multiple data-centers. Note that ClickHouse supports unlimited number of replicas. + +Example config for cluster of one shard containing three replicas: +``` xml + + ... + + + + example-perftest01j.yandex.ru + 9000 + + + example-perftest02j.yandex.ru + 9000 + + + example-perftest03j.yandex.ru + 9000 + + + + +``` + +To enable native replication ZooKeeper is required. ClickHouse will take care of data consistency on all replicas and run restore procedure after failure + automatically. It's recommended to deploy ZooKeeper cluster to separate servers. + +ZooKeeper is not a strict requirement: in some simple cases you can duplicate the data by writing it into all the replicas from your application code. This approach is **not** recommended, in this case ClickHouse won't be able to + guarantee data consistency on all replicas. This remains the responsibility of your application. + +ZooKeeper locations need to be specified in configuration file: +``` xml + + + zoo01.yandex.ru + 2181 + + + zoo02.yandex.ru + 2181 + + + zoo03.yandex.ru + 2181 + + +``` + +Also we need to set macros for identifying each shard and replica, it will be used on table creation: +``` xml + + 01 + 01 + +``` + +If there are no replicas at the moment on replicated table creation, a new first replica will be instantiated. If there are already live replicas, new replica will clone the data from existing ones. You have an option to create all replicated tables first and that insert data to it. Another option is to create some replicas and add the others after or during data insertion. + +``` sql +CREATE TABLE tutorial.hits_replica (...) +ENGINE = ReplcatedMergeTree( + '/clickhouse_perftest/tables/{shard}/hits', + '{replica}' +) +... +``` + +Here we use [ReplicatedMergeTree](../operations/table_engines/replication.md) table engine. In parameters we specify ZooKeeper path containing shard and replica identifiers. + +``` sql +INSERT INTO tutorial.hits_replica SELECT * FROM tutorial.hits_local; +``` +Replication operates in multi-master mode. Data can be loaded into any replica and it will be synced with other instances automatically. Replication is asynchronous so at a given moment of time not all replicas may contain recently inserted data. To allow data insertion at least one replica should be up. Others will sync up data and repair consistency once they will become active again. Please notice that such approach allows for the low possibility of loss of just appended data. diff --git a/docs/en/interfaces/cli.md b/docs/en/interfaces/cli.md index 394313fccdb..198e5f5c094 100644 --- a/docs/en/interfaces/cli.md +++ b/docs/en/interfaces/cli.md @@ -1,8 +1,8 @@ # Command-line Client -ClickHouse provides the native command line client `clickhouse-client`. The client supports command-line options and configuration files. For more information, see "[Configuring](#interfaces_cli_configuration)". +ClickHouse provides a native command-line client: `clickhouse-client`. The client supports command-line options and configuration files. For more information, see [Configuring](#interfaces_cli_configuration). -[Install](../getting_started/index.md) it by the `clickhouse-client` package and run it by the command `clickhouse-client`. +[Install](../getting_started/index.md) it from the `clickhouse-client` package and run it with the command `clickhouse-client`. ```bash $ clickhouse-client @@ -13,7 +13,7 @@ Connected to ClickHouse server version 19.17.1 revision 54428. :) ``` -Different versions of client and server are compatible, but some features may be disabled for older clients. We don't recommend using different versions of the client and the server app. When you try to use the client of the older version, then the server, `clickhouse-client` displays the message: +Different client and server versions are compatible with one another, but some features may not be available in older clients. We recommend using the same version of the client as the server app. When you try to use a client of the older version, then the server, `clickhouse-client` displays the message: ``` ClickHouse client version is older than ClickHouse server. It may lack support for new features. @@ -45,9 +45,9 @@ Similarly, to process a large number of queries, you can run 'clickhouse-client' In interactive mode, you get a command line where you can enter queries. -If 'multiline' is not specified (the default):To run the query, press Enter. The semicolon is not necessary at the end of the query. To enter a multiline query, enter a backslash `\` before the line feed. After you press Enter, you will be asked to enter the next line of the query. +If 'multiline' is not specified (the default): To run the query, press Enter. The semicolon is not necessary at the end of the query. To enter a multiline query, enter a backslash `\` before the line feed. After you press Enter, you will be asked to enter the next line of the query. -If multiline is specified:To run a query, end it with a semicolon and press Enter. If the semicolon was omitted at the end of the entered line, you will be asked to enter the next line of the query. +If multiline is specified: To run a query, end it with a semicolon and press Enter. If the semicolon was omitted at the end of the entered line, you will be asked to enter the next line of the query. Only a single query is run, so everything after the semicolon is ignored. diff --git a/docs/en/interfaces/http.md b/docs/en/interfaces/http.md index ee05a1cdb64..25a146f78b3 100644 --- a/docs/en/interfaces/http.md +++ b/docs/en/interfaces/http.md @@ -28,8 +28,12 @@ $ wget -O- -q 'http://localhost:8123/?query=SELECT 1' $ echo -ne 'GET /?query=SELECT%201 HTTP/1.0\r\n\r\n' | nc localhost 8123 HTTP/1.0 200 OK +Date: Wed, 27 Nov 2019 10:30:18 GMT Connection: Close -Date: Fri, 16 Nov 2012 19:21:50 GMT +Content-Type: text/tab-separated-values; charset=UTF-8 +X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal +X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f +X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"} 1 ``` diff --git a/docs/en/introduction/ya_metrika_task.md b/docs/en/introduction/history.md similarity index 98% rename from docs/en/introduction/ya_metrika_task.md rename to docs/en/introduction/history.md index 41b33eff581..e8f373880f1 100644 --- a/docs/en/introduction/ya_metrika_task.md +++ b/docs/en/introduction/history.md @@ -1,4 +1,4 @@ -# Yandex.Metrica Use Case +# ClickHouse History ClickHouse was originally developed to power [Yandex.Metrica](https://metrica.yandex.com/), [the second largest web analytics platform in the world](http://w3techs.com/technologies/overview/traffic_analysis/all), and continues to be the core component of this system. With more than 13 trillion records in the database and more than 20 billion events daily, ClickHouse allows generating custom reports on the fly directly from non-aggregated data. This article briefly covers the goals of ClickHouse in the early stages of its development. @@ -47,4 +47,4 @@ OLAPServer worked well for non-aggregated data, but it had many restrictions tha To remove the limitations of OLAPServer and solve the problem of working with non-aggregated data for all reports, we developed the ClickHouse DBMS. -[Original article](https://clickhouse.yandex/docs/en/introduction/ya_metrika_task/) +[Original article](https://clickhouse.yandex/docs/en/introduction/history/) diff --git a/docs/en/operations/monitoring.md b/docs/en/operations/monitoring.md index eaa0ffdd406..331c3c0144f 100644 --- a/docs/en/operations/monitoring.md +++ b/docs/en/operations/monitoring.md @@ -34,4 +34,4 @@ You can configure ClickHouse to export metrics to [Graphite](https://github.com/ Additionally, you can monitor server availability through the HTTP API. Send the `HTTP GET` request to `/`. If the server is available, it responds with `200 OK`. -To monitor servers in a cluster configuration, you should set the [max_replica_delay_for_distributed_queries](settings/settings.md#settings-max_replica_delay_for_distributed_queries) parameter and use the HTTP resource `/replicas-delay`. A request to `/replicas-delay` returns `200 OK` if the replica is available and is not delayed behind the other replicas. If a replica is delayed, it returns information about the gap. +To monitor servers in a cluster configuration, you should set the [max_replica_delay_for_distributed_queries](settings/settings.md#settings-max_replica_delay_for_distributed_queries) parameter and use the HTTP resource `/replicas_status`. A request to `/replicas_status` returns `200 OK` if the replica is available and is not delayed behind the other replicas. If a replica is delayed, it returns information about the gap. diff --git a/docs/en/operations/server_settings/settings.md b/docs/en/operations/server_settings/settings.md index 4158cad3440..c76637cc927 100644 --- a/docs/en/operations/server_settings/settings.md +++ b/docs/en/operations/server_settings/settings.md @@ -578,6 +578,32 @@ If the table doesn't exist, ClickHouse will create it. If the structure of the q ``` +## query_thread_log {#server_settings-query-thread-log} + +Setting for logging threads of queries received with the [log_query_threads=1](../settings/settings.md#settings-log-query-threads) setting. + +Queries are logged in the [system.query_thread_log](../system_tables.md#system_tables-query-thread-log) table, not in a separate file. You can change the name of the table in the `table` parameter (see below). + +Use the following parameters to configure logging: + +- `database` – Name of the database. +- `table` – Name of the system table the queries will be logged in. +- `partition_by` – Sets a [custom partitioning key](../../operations/table_engines/custom_partitioning_key.md) for a system table. +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. + +If the table doesn't exist, ClickHouse will create it. If the structure of the query thread log changed when the ClickHouse server was updated, the table with the old structure is renamed, and a new table is created automatically. + +**Example** + +```xml + + system + query_thread_log
+ toMonday(event_date) + 7500 +
+``` + ## trace_log {#server_settings-trace_log} Settings for the [trace_log](../system_tables.md#system_tables-trace_log) system table operation. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 0ad80cfed2f..ab3f5b95a56 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -130,6 +130,17 @@ Possible values: Default value: 0. +## max_http_get_redirects {#setting-max_http_get_redirects} + +Limits the maximum number of HTTP GET redirect hops for [URL](../table_engines/url.md)-engine tables. The setting applies to the both types of tables: created by [CREATE TABLE](../../query_language/create/#create-table-query) query and by [url](../../query_language/table_functions/url.md) table function. + +Possible values: + +- Positive integer number of hops. +- 0 — Unlimited number of hops. + +Default value: 0. + ## input_format_allow_errors_num {#settings-input_format_allow_errors_num} Sets the maximum number of acceptable errors when reading from text formats (CSV, TSV, etc.). @@ -513,6 +524,16 @@ Queries sent to ClickHouse with this setup are logged according to the rules in log_queries=1 +## log_query_threads {#settings-log-query-threads} + +Setting up query threads logging. + +Queries' threads runned by ClickHouse with this setup are logged according to the rules in the [query_thread_log](../server_settings/settings.md#server_settings-query-thread-log) server configuration parameter. + +**Example**: + + log_query_threads=1 + ## max_insert_block_size {#settings-max_insert_block_size} The size of blocks to form for insertion into a table. @@ -595,6 +616,13 @@ Timeouts in seconds on the socket used for communicating with the client. Default value: 10, 300, 300. +## cancel_http_readonly_queries_on_client_close + +Cancels HTTP readonly queries (e.g. SELECT) when a client closes the connection without waiting for response. + +Default value: 0 + + ## poll_interval Lock in a wait loop for the specified number of seconds. @@ -979,6 +1007,41 @@ Error count of each replica is capped at this value, preventing a single replica - [Table engine Distributed](../../operations/table_engines/distributed.md) - [`distributed_replica_error_half_life`](#settings-distributed_replica_error_half_life) + +## distributed_directory_monitor_sleep_time_ms {#distributed_directory_monitor_sleep_time_ms} + +Base interval of data sending by the [Distributed](../table_engines/distributed.md) table engine. Actual interval grows exponentially in case of any errors. + +Possible values: + +- Positive integer number of milliseconds. + +Default value: 100 milliseconds. + + +## distributed_directory_monitor_max_sleep_time_ms {#distributed_directory_monitor_max_sleep_time_ms} + +Maximum interval of data sending by the [Distributed](../table_engines/distributed.md) table engine. Limits exponential growth of the interval set in the [distributed_directory_monitor_sleep_time_ms](#distributed_directory_monitor_sleep_time_ms) setting. + +Possible values: + +- Positive integer number of milliseconds. + +Default value: 30000 milliseconds (30 seconds). + +## distributed_directory_monitor_batch_inserts {#distributed_directory_monitor_batch_inserts} + +Enables/disables sending of inserted data in batches. + +When batch sending is enabled, [Distributed](../table_engines/distributed.md) table engine tries to send multiple files of inserted data in one operation instead of sending them separately. Batch sending improves cluster performance by better server and network resources utilization. + +Possible values: + +- 1 — Enabled. +- 0 — Disabled. + +Defaule value: 0. + ## os_thread_priority {#setting-os_thread_priority} Sets the priority ([nice](https://en.wikipedia.org/wiki/Nice_(Unix))) for threads that execute queries. The OS scheduler considers this priority when choosing the next thread to run on each available CPU core. @@ -994,6 +1057,64 @@ Lower values mean higher priority. Threads with low `nice` priority values are e Default value: 0. + +## query_profiler_real_time_period_ns {#query_profiler_real_time_period_ns} + +Sets the period for a real clock timer of the query profiler. Real clock timer counts wall-clock time. + +Possible values: + +- Positive integer number, in nanoseconds. + + Recommended values: + + - 10000000 (100 times a second) nanoseconds and less for single queries. + - 1000000000 (once a second) for cluster-wide profiling. + +- 0 for turning off the timer. + +Type: [UInt64](../../data_types/int_uint.md). + +Default value: 1000000000 nanoseconds (once a second). + +**See Also** + +- [system.trace_log](../system_tables.md#system_tables-trace_log) + +## query_profiler_cpu_time_period_ns {#query_profiler_cpu_time_period_ns} + +Sets the period for a CPU clock timer of the query profiler. This timer counts only CPU time. + +Possible values: + +- Positive integer number of nanoseconds. + + Recommended values: + + - 10000000 (100 times a second) nanosecods and more for for single queries. + - 1000000000 (once a second) for cluster-wide profiling. + +- 0 for turning off the timer. + +Type: [UInt64](../../data_types/int_uint.md). + +Default value: 1000000000 nanoseconds. + +**See Also** + +- [system.trace_log](../system_tables.md#system_tables-trace_log) + +## allow_introspection_functions {#settings-allow_introspection_functions} + +Enables of disables [introspections functions](../../query_language/functions/introspection.md) for query profiling. + +Possible values: + +- 1 — Introspection functions enabled. +- 0 — Introspection functions disabled. + +Default value: 0. + ## input_format_parallel_parsing - Type: bool diff --git a/docs/en/operations/system_tables.md b/docs/en/operations/system_tables.md index e85d5225763..77964c7377f 100644 --- a/docs/en/operations/system_tables.md +++ b/docs/en/operations/system_tables.md @@ -206,7 +206,7 @@ Columns: ## system.graphite_retentions -Contains information about parameters [graphite_rollup](server_settings/settings.md#server_settings-graphite_rollup) which are used in tables with [*GraphiteMergeTree](table_engines/graphitemergetree.md) engines. +Contains information about parameters [graphite_rollup](server_settings/settings.md#server_settings-graphite_rollup) which are used in tables with [\*GraphiteMergeTree](table_engines/graphitemergetree.md) engines. Columns: @@ -418,8 +418,8 @@ Columns: - `'QueryFinish' = 2` — Successful end of query execution. - `'ExceptionBeforeStart' = 3` — Exception before the start of query execution. - `'ExceptionWhileProcessing' = 4` — Exception during the query execution. -- `event_date` (Date) — Event date. -- `event_time` (DateTime) — Event time. +- `event_date` (Date) — Query starting date. +- `event_time` (DateTime) — Query starting time. - `query_start_time` (DateTime) — Start time of query execution. - `query_duration_ms` (UInt64) — Duration of query execution. - `read_rows` (UInt64) — Number of read rows. @@ -437,36 +437,32 @@ Columns: - 0 — Query was initiated by another query for distributed query execution. - `user` (String) — Name of the user who initiated the current query. - `query_id` (String) — ID of the query. -- `address` (FixedString(16)) — IP address the query was initiated from. -- `port` (UInt16) — The server port that was used to receive the query. -- `initial_user` (String) — Name of the user who ran the parent query (for distributed query execution). -- `initial_query_id` (String) — ID of the parent query. -- `initial_address` (FixedString(16)) — IP address that the parent query was launched from. -- `initial_port` (UInt16) — The server port that was used to receive the parent query from the client. +- `address` (IPv6) — IP address that was used to make the query. +- `port` (UInt16) — The client port that was used to make the query. +- `initial_user` (String) — Name of the user who ran the initial query (for distributed query execution). +- `initial_query_id` (String) — ID of the initial query (for distributed query execution). +- `initial_address` (IPv6) — IP address that the parent query was launched from. +- `initial_port` (UInt16) — The client port that was used to make the parent query. - `interface` (UInt8) — Interface that the query was initiated from. Possible values: - 1 — TCP. - 2 — HTTP. -- `os_user` (String) — User's OS. -- `client_hostname` (String) — Server name that the [clickhouse-client](../interfaces/cli.md) is connected to. -- `client_name` (String) — The [clickhouse-client](../interfaces/cli.md) name. -- `client_revision` (UInt32) — Revision of the [clickhouse-client](../interfaces/cli.md). -- `client_version_major` (UInt32) — Major version of the [clickhouse-client](../interfaces/cli.md). -- `client_version_minor` (UInt32) — Minor version of the [clickhouse-client](../interfaces/cli.md). -- `client_version_patch` (UInt32) — Patch component of the [clickhouse-client](../interfaces/cli.md) version. +- `os_user` (String) — OS's username who runs [clickhouse-client](../interfaces/cli.md). +- `client_hostname` (String) — Hostname of the client machine where the [clickhouse-client](../interfaces/cli.md) or another TCP client is run. +- `client_name` (String) — The [clickhouse-client](../interfaces/cli.md) or another TCP client name. +- `client_revision` (UInt32) — Revision of the [clickhouse-client](../interfaces/cli.md) or another TCP client. +- `client_version_major` (UInt32) — Major version of the [clickhouse-client](../interfaces/cli.md) or another TCP client. +- `client_version_minor` (UInt32) — Minor version of the [clickhouse-client](../interfaces/cli.md) or another TCP client. +- `client_version_patch` (UInt32) — Patch component of the [clickhouse-client](../interfaces/cli.md) or another TCP client version. - `http_method` (UInt8) — HTTP method that initiated the query. Possible values: - 0 — The query was launched from the TCP interface. - 1 — `GET` method was used. - 2 — `POST` method was used. - `http_user_agent` (String) — The `UserAgent` header passed in the HTTP request. -- `quota_key` (String) — The quota key specified in the [quotas](quotas.md) setting. +- `quota_key` (String) — The "quota key" specified in the [quotas](quotas.md) setting (see `keyed`). - `revision` (UInt32) — ClickHouse revision. - `thread_numbers` (Array(UInt32)) — Number of threads that are participating in query execution. -- `ProfileEvents.Names` (Array(String)) — Counters that measure the following metrics: - - Time spent on reading and writing over the network. - - Time spent on reading and writing to a disk. - - Number of network errors. - - Time spent on waiting when the network bandwidth is limited. -- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` column. +- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [system.events](#system_tables-events) +- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` column. - `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1. - `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` column. @@ -485,12 +481,77 @@ When the table is deleted manually, it will be automatically created on the fly. You can specify an arbitrary partitioning key for the `system.query_log` table in the [query_log](server_settings/settings.md#server_settings-query-log) server setting (see the `partition_by` parameter). +## system.query_thread_log {#system_tables-query-thread-log} + +The table contains information about each query execution thread. + +ClickHouse creates this table only if the [query_thread_log](server_settings/settings.md#server_settings-query-thread-log) server parameter is specified. This parameter sets the logging rules, such as the logging interval or the name of the table the queries will be logged in. + +To enable query logging, set the [log_query_threads](settings/settings.md#settings-log-query-threads) parameter to 1. For details, see the [Settings](settings/settings.md) section. + +Columns: + +- `event_date` (Date) — the date when the thread has finished execution of the query. +- `event_time` (DateTime) — the date and time when the thread has finished execution of the query. +- `query_start_time` (DateTime) — Start time of query execution. +- `query_duration_ms` (UInt64) — Duration of query execution. +- `read_rows` (UInt64) — Number of read rows. +- `read_bytes` (UInt64) — Number of read bytes. +- `written_rows` (UInt64) — For `INSERT` queries, the number of written rows. For other queries, the column value is 0. +- `written_bytes` (UInt64) — For `INSERT` queries, the number of written bytes. For other queries, the column value is 0. +- `memory_usage` (Int64) — The difference between the amount of allocated and freed memory in context of this thread. +- `peak_memory_usage` (Int64) — The maximum difference between the amount of allocated and freed memory in context of this thread. +- `thread_name` (String) — Name of the thread. +- `thread_number` (UInt32) — Internal thread ID. +- `os_thread_id` (Int32) — OS thread ID. +- `master_thread_number` (UInt32) — Internal ID of initial thread. +- `master_os_thread_id` (Int32) — OS initial ID of initial thread. +- `query` (String) — Query string. +- `is_initial_query` (UInt8) — Query type. Possible values: + - 1 — Query was initiated by the client. + - 0 — Query was initiated by another query for distributed query execution. +- `user` (String) — Name of the user who initiated the current query. +- `query_id` (String) — ID of the query. +- `address` (IPv6) — IP address that was used to make the query. +- `port` (UInt16) — The client port that was used to make the query. +- `initial_user` (String) — Name of the user who ran the initial query (for distributed query execution). +- `initial_query_id` (String) — ID of the initial query (for distributed query execution). +- `initial_address` (IPv6) — IP address that the parent query was launched from. +- `initial_port` (UInt16) — The client port that was used to make the parent query. +- `interface` (UInt8) — Interface that the query was initiated from. Possible values: + - 1 — TCP. + - 2 — HTTP. +- `os_user` (String) — OS's username who runs [clickhouse-client](../interfaces/cli.md). +- `client_hostname` (String) — Hostname of the client machine where the [clickhouse-client](../interfaces/cli.md) or another TCP client is run. +- `client_name` (String) — The [clickhouse-client](../interfaces/cli.md) or another TCP client name. +- `client_revision` (UInt32) — Revision of the [clickhouse-client](../interfaces/cli.md) or another TCP client. +- `client_version_major` (UInt32) — Major version of the [clickhouse-client](../interfaces/cli.md) or another TCP client. +- `client_version_minor` (UInt32) — Minor version of the [clickhouse-client](../interfaces/cli.md) or another TCP client. +- `client_version_patch` (UInt32) — Patch component of the [clickhouse-client](../interfaces/cli.md) or another TCP client version. +- `http_method` (UInt8) — HTTP method that initiated the query. Possible values: + - 0 — The query was launched from the TCP interface. + - 1 — `GET` method was used. + - 2 — `POST` method was used. +- `http_user_agent` (String) — The `UserAgent` header passed in the HTTP request. +- `quota_key` (String) — The "quota key" specified in the [quotas](quotas.md) setting (see `keyed`). +- `revision` (UInt32) — ClickHouse revision. +- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics for this thread. The description of them could be found in the table [system.events](#system_tables-events) +- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` column. + +By default, logs are added to the table at intervals of 7.5 seconds. You can set this interval in the [query_thread_log](server_settings/settings.md#server_settings-query-thread-log) server setting (see the `flush_interval_milliseconds` parameter). To flush the logs forcibly from the memory buffer into the table, use the `SYSTEM FLUSH LOGS` query. + +When the table is deleted manually, it will be automatically created on the fly. Note that all the previous logs will be deleted. + +!!! note + The storage period for logs is unlimited. Logs aren't automatically deleted from the table. You need to organize the removal of outdated logs yourself. + +You can specify an arbitrary partitioning key for the `system.query_thread_log` table in the [query_thread_log](server_settings/settings.md#server_settings-query-thread-log) server setting (see the `partition_by` parameter). ## system.trace_log {#system_tables-trace_log} Contains stack traces collected by the sampling query profiler. -ClickHouse creates this table when the [trace_log](server_settings/settings.md#server_settings-trace_log) server configuration section is set. Also the `query_profiler_real_time_period_ns` and `query_profiler_cpu_time_period_ns` settings should be set. +ClickHouse creates this table when the [trace_log](server_settings/settings.md#server_settings-trace_log) server configuration section is set. Also the [query_profiler_real_time_period_ns](settings/settings.md#query_profiler_real_time_period_ns) and [query_profiler_cpu_time_period_ns](settings/settings.md#query_profiler_cpu_time_period_ns) settings should be set. To analyze logs, use the `addressToLine`, `addressToSymbol` and `demangle` introspection functions. diff --git a/docs/en/operations/table_engines/distributed.md b/docs/en/operations/table_engines/distributed.md index 38d085da568..a22fd43b34f 100644 --- a/docs/en/operations/table_engines/distributed.md +++ b/docs/en/operations/table_engines/distributed.md @@ -87,12 +87,9 @@ The Distributed engine requires writing clusters to the config file. Clusters fr There are two methods for writing data to a cluster: -First, you can define which servers to write which data to, and perform the write directly on each shard. In other words, perform INSERT in the tables that the distributed table "looks at". -This is the most flexible solution – you can use any sharding scheme, which could be non-trivial due to the requirements of the subject area. -This is also the most optimal solution, since data can be written to different shards completely independently. +First, you can define which servers to write which data to, and perform the write directly on each shard. In other words, perform INSERT in the tables that the distributed table "looks at". This is the most flexible solution – you can use any sharding scheme, which could be non-trivial due to the requirements of the subject area. This is also the most optimal solution, since data can be written to different shards completely independently. -Second, you can perform INSERT in a Distributed table. In this case, the table will distribute the inserted data across servers itself. -In order to write to a Distributed table, it must have a sharding key set (the last parameter). In addition, if there is only one shard, the write operation works without specifying the sharding key, since it doesn't have any meaning in this case. +Second, you can perform INSERT in a Distributed table. In this case, the table will distribute the inserted data across servers itself. In order to write to a Distributed table, it must have a sharding key set (the last parameter). In addition, if there is only one shard, the write operation works without specifying the sharding key, since it doesn't have any meaning in this case. Each shard can have a weight defined in the config file. By default, the weight is equal to one. Data is distributed across shards in the amount proportional to the shard weight. For example, if there are two shards and the first has a weight of 9 while the second has a weight of 10, the first will be sent 9 / 19 parts of the rows, and the second will be sent 10 / 19. @@ -115,7 +112,7 @@ You should be concerned about the sharding scheme in the following cases: - Queries are used that require joining data (IN or JOIN) by a specific key. If data is sharded by this key, you can use local IN or JOIN instead of GLOBAL IN or GLOBAL JOIN, which is much more efficient. - A large number of servers is used (hundreds or more) with a large number of small queries (queries of individual clients - websites, advertisers, or partners). In order for the small queries to not affect the entire cluster, it makes sense to locate data for a single client on a single shard. Alternatively, as we've done in Yandex.Metrica, you can set up bi-level sharding: divide the entire cluster into "layers", where a layer may consist of multiple shards. Data for a single client is located on a single layer, but shards can be added to a layer as necessary, and data is randomly distributed within them. Distributed tables are created for each layer, and a single shared distributed table is created for global queries. -Data is written asynchronously. For an INSERT to a Distributed table, the data block is just written to the local file system. The data is sent to the remote servers in the background as soon as possible. You should check whether data is sent successfully by checking the list of files (data waiting to be sent) in the table directory: /var/lib/clickhouse/data/database/table/. +Data is written asynchronously. When inserted to the table, the data block is just written to the local file system. The data is sent to the remote servers in the background as soon as possible. The period of data sending is managed by the [distributed_directory_monitor_sleep_time_ms](../settings/settings.md#distributed_directory_monitor_sleep_time_ms) and [distributed_directory_monitor_max_sleep_time_ms](../settings/settings.md#distributed_directory_monitor_max_sleep_time_ms) settings. The `Distributed` engine sends each file with inserted data separately, but you can enable batch sending of files with the [distributed_directory_monitor_batch_inserts](../settings/settings.md#distributed_directory_monitor_batch_inserts) setting. This setting improves cluster performance by better local server and network resources utilization. You should check whether data is sent successfully by checking the list of files (data waiting to be sent) in the table directory: `/var/lib/clickhouse/data/database/table/`. If the server ceased to exist or had a rough restart (for example, after a device failure) after an INSERT to a Distributed table, the inserted data might be lost. If a damaged data part is detected in the table directory, it is transferred to the 'broken' subdirectory and no longer used. diff --git a/docs/en/operations/table_engines/url.md b/docs/en/operations/table_engines/url.md index 6521604171c..cb7b57b35c3 100644 --- a/docs/en/operations/table_engines/url.md +++ b/docs/en/operations/table_engines/url.md @@ -17,6 +17,8 @@ additional headers for getting a response from the server. respectively. For processing `POST` requests, the remote server must support [Chunked transfer encoding](https://en.wikipedia.org/wiki/Chunked_transfer_encoding). +You can limit the maximum number of HTTP GET redirect hops by the [max_http_get_redirects](../settings/settings.md#setting-max_http_get_redirects) setting. + **Example:** **1.** Create a `url_engine_table` table on the server : diff --git a/docs/en/query_language/create.md b/docs/en/query_language/create.md index 480cc788d11..3757a154166 100644 --- a/docs/en/query_language/create.md +++ b/docs/en/query_language/create.md @@ -196,15 +196,16 @@ High compression levels are useful for asymmetric scenarios, like compress once, ClickHouse supports temporary tables which have the following characteristics: - Temporary tables disappear when the session ends, including if the connection is lost. -- A temporary table use the Memory engine only. +- A temporary table uses the Memory engine only. - The DB can't be specified for a temporary table. It is created outside of databases. +- Impossible to create a temporary table with distributed DDL query on all cluster servers (by using `ON CLUSTER`): this table exists only in the current session. - If a temporary table has the same name as another one and a query specifies the table name without specifying the DB, the temporary table will be used. - For distributed query processing, temporary tables used in a query are passed to remote servers. To create a temporary table, use the following syntax: ```sql -CREATE TEMPORARY TABLE [IF NOT EXISTS] table_name [ON CLUSTER cluster] +CREATE TEMPORARY TABLE [IF NOT EXISTS] table_name ( name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], @@ -214,6 +215,8 @@ CREATE TEMPORARY TABLE [IF NOT EXISTS] table_name [ON CLUSTER cluster] In most cases, temporary tables are not created manually, but when using external data for a query, or for distributed `(GLOBAL) IN`. For more information, see the appropriate sections +It's possible to use tables with [ENGINE = Memory](../operations/table_engines/memory.md) instead of temporary tables. + ## Distributed DDL queries (ON CLUSTER clause) The `CREATE`, `DROP`, `ALTER`, and `RENAME` queries support distributed execution on a cluster. diff --git a/docs/en/query_language/functions/array_functions.md b/docs/en/query_language/functions/array_functions.md index 2454df4042e..ab194deff15 100644 --- a/docs/en/query_language/functions/array_functions.md +++ b/docs/en/query_language/functions/array_functions.md @@ -682,7 +682,7 @@ SELECT arrayDifference([0, 10000000000000000000]) ## arrayDistinct(arr) {#array_functions-arraydistinct} -Takes an array, returns an array containing the distinct elements. +Takes an array, returns an array containing the distinct elements. Example: @@ -698,7 +698,7 @@ SELECT arrayDistinct([1, 2, 2, 3, 1]) ## arrayEnumerateDense(arr) {#array_functions-arrayenumeratedense} -Returns an array of the same size as the source array, indicating where each element first appears in the source array. +Returns an array of the same size as the source array, indicating where each element first appears in the source array. Example: @@ -772,22 +772,6 @@ SELECT arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) └─────────────────────────────────────────────────────────────┘ ``` -## arrayFlatten(arr) {#array_functions-arrayflatten} - -The `arrayFlatten` (or `flatten` alias) method will collapse the elements of an array to create a single array. - -Example: - -```sql -SELECT arrayFlatten([[1, 2, 3], [4, 5]]) -``` - -```text -┌─arrayFlatten([[1, 2, 3], [4, 5]])─┐ -│ [1,2,3,4,5] │ -└───────────────────────────────────┘ -``` - ## arrayReverse(arr) {#array_functions-arrayreverse} Returns an array of the same size as the original array containing the elements in reverse order. @@ -808,22 +792,78 @@ SELECT arrayReverse([1, 2, 3]) Synonym for ["arrayReverse"](#array_functions-arrayreverse) -[Original article](https://clickhouse.yandex/docs/en/query_language/functions/array_functions/) +## arrayFlatten {#arrayflatten} -## arrayCompact(arr) {#array_functions-arraycompact} +Converts array of arrays to a flat array. -Takes an array, returns an array with consecutive duplicate elements removed. +Function: -Example: +- Applies for any depth of nested arrays, but all the elements should lay at the same level. + + For example, the `[[[1]], [[2], [3]]]` array can be flattened, but the `[[1], [[2], [3]]]` array can't be flattened. + +- Does not change arrays that are already flat. + +The flattened array contains all the elements from all source arrays. + +**Syntax** ```sql -SELECT arrayCompact([1, 2, 2, 3, 2, 3, 3]) +flatten(array_of_arrays) ``` +Alias: `flatten`. + + +**Parameters** + +- `array_of_arrays` — [Array](../../data_types/array.md) of arrays. For example, `[[1,2,3], [4,5]]`. + +**Examples** + +```sql +SELECT flatten([[[1]], [[2], [3]]]) +``` +```text +┌─flatten(array(array([1]), array([2], [3])))─┐ +│ [1,2,3] │ +└─────────────────────────────────────────────┘ +``` + +## arrayCompact {#arraycompact} + +Removes consecutive duplicate elements from an array. The order of result values is determined by the order in the source array. + +**Syntax** + +```sql +arrayCompact(arr) +``` + +**Parameters** + +`arr` — The [array](../../data_types/array.md) to inspect. + +**Returned value** + +The array without duplicate. + +Type: `Array`. + +**Example** + +Query: + +```sql +SELECT arrayCompact([1, 1, nan, nan, 2, 3, 3, 3]) +``` + +Result: + ```text -┌─arrayCompact([1, 2, 2, 3, 2, 3, 3])──┐ -│ [1,2,3,2,3] │ -└──────────────────────────────────────┘ +┌─arrayCompact([1, 1, nan, nan, 2, 3, 3, 3])─┐ +│ [1,nan,nan,2,3] │ +└────────────────────────────────────────────┘ ``` -## \ No newline at end of file +[Original article](https://clickhouse.yandex/docs/en/query_language/functions/array_functions/) diff --git a/docs/en/query_language/functions/hash_functions.md b/docs/en/query_language/functions/hash_functions.md index 2d21d2290ad..d98c56cd584 100644 --- a/docs/en/query_language/functions/hash_functions.md +++ b/docs/en/query_language/functions/hash_functions.md @@ -179,6 +179,8 @@ SELECT farmHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:0 Calculates [JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452) from a string. This hash function is neither fast nor having a good quality. The only reason to use it is when this algorithm is already used in another system and you have to calculate exactly the same result. +**Syntax** + ```sql SELECT javaHash(''); ``` @@ -187,8 +189,6 @@ SELECT javaHash(''); A `Int32` data type hash value. -Type: `javaHash`. - **Example** Query: @@ -205,15 +205,40 @@ Result: └───────────────────────────┘ ``` -## javaHashUTF16LE +## javaHashUTF16LE {#javahashutf16le} -The same as [JavaHash](#hash_functions-javahash), but for UTF-16LE code points. Works under the assumption that the string contains a set of bytes representing a UTF-16LE encoded text. If this assumption is not met, it returns some result (It only throws an exception in partial cases). +Calculates [JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452) from a string, assuming it contains bytes representing a string in UTF-16LE encoding. +**Syntax** + +```sql +javaHashUTF16LE(stringUtf16le) +``` + +**Parameters** + +- `stringUtf16le` — a string in UTF-16LE encoding. + +**Returned value** + +A `Int32` data type hash value. **Example** +Correct query with UTF-16LE encoded string. + +Query: + ```sql -SELECT javaHashUTF16LE(convertCharset('Hello, world!', 'utf-8', 'utf-16le')) +SELECT javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le')) +``` + +Result: + +```text +┌─javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le'))─┐ +│ 3556498 │ +└──────────────────────────────────────────────────────────────┘ ``` ## hiveHash {#hash_functions-hivehash} diff --git a/docs/en/query_language/functions/introspection.md b/docs/en/query_language/functions/introspection.md new file mode 100644 index 00000000000..520c89feaeb --- /dev/null +++ b/docs/en/query_language/functions/introspection.md @@ -0,0 +1,298 @@ +# Introspection Functions + +You can use functions described in this chapter to introspect [ELF](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format) and [DWARF](https://en.wikipedia.org/wiki/DWARF) for query profiling. + +!!! warning "Warning" + These functions are slow and may impose security considerations. + +For proper operation of introspection functions: + +- Install the `clickhouse-common-static-dbg` package. +- Set the [allow_introspection_functions](../../operations/settings/settings.md#settings-allow_introspection_functions) setting to 1. + + For security reasons introspection functions are disabled by default. + +ClickHouse saves profiler reports to the [trace_log](../../operations/system_tables.md#system_tables-trace_log) system table. Make sure the table and profiler are configured properly. + +## addressToLine {#addresstoline} + +Converts virtual memory address inside ClickHouse server process to the filename and the line number in ClickHouse source code. + +If you use official ClickHouse packages, you need to install the `clickhouse-common-static-dbg` package. + +**Syntax** + +```sql +addressToLine(address_of_binary_instruction) +``` + +**Parameters** + +- `address_of_binary_instruction` ([UInt64](../../data_types/int_uint.md)) — Address of instruction in a running process. + +**Returned value** + +- Source code filename and the line number in this file delimited by colon. + + For example, `/build/obj-x86_64-linux-gnu/../dbms/src/Common/ThreadPool.cpp:199`, where `199` is a line number. + +- Name of a binary, if the function couldn't find the debug information. +- Empty string, if the address is not valid. + +Type: [String](../../data_types/string.md). + +**Example** + +Enabling introspection functions: + +```sql +SET allow_introspection_functions=1 +``` + +Selecting the first string from the `trace_log` system table: + +```sql +SELECT * FROM system.trace_log LIMIT 1 \G +``` +```text +Row 1: +────── +event_date: 2019-11-19 +event_time: 2019-11-19 18:57:23 +revision: 54429 +timer_type: Real +thread_number: 48 +query_id: 421b6855-1858-45a5-8f37-f383409d6d72 +trace: [140658411141617,94784174532828,94784076370703,94784076372094,94784076361020,94784175007680,140658411116251,140658403895439] +``` + +The `trace` field contains the stack trace at the moment of sampling. + +Getting the source code filename and the line number for a single address: + +```sql +SELECT addressToLine(94784076370703) \G +``` +```text +Row 1: +────── +addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../dbms/src/Common/ThreadPool.cpp:199 +``` + +Applying the function to the whole stack trace: + +```sql +SELECT + arrayStringConcat(arrayMap(x -> addressToLine(x), trace), '\n') AS trace_source_code_lines +FROM system.trace_log +LIMIT 1 +\G +``` + +The [arrayMap](higher_order_functions.md#higher_order_functions-array-map) function allows to process each individual element of the `trace` array by the `addressToLine` function. The result of this processing you see in the `trace_source_code_lines` column of output. + +```text +Row 1: +────── +trace_source_code_lines: /lib/x86_64-linux-gnu/libpthread-2.27.so +/usr/lib/debug/usr/bin/clickhouse +/build/obj-x86_64-linux-gnu/../dbms/src/Common/ThreadPool.cpp:199 +/build/obj-x86_64-linux-gnu/../dbms/src/Common/ThreadPool.h:155 +/usr/include/c++/9/bits/atomic_base.h:551 +/usr/lib/debug/usr/bin/clickhouse +/lib/x86_64-linux-gnu/libpthread-2.27.so +/build/glibc-OTsEL5/glibc-2.27/misc/../sysdeps/unix/sysv/linux/x86_64/clone.S:97 +``` + +## addressToSymbol {#addresstosymbol} + +Converts virtual memory address inside ClickHouse server process to the symbol from ClickHouse object files. + + +**Syntax** + +```sql +addressToSymbol(address_of_binary_instruction) +``` + +**Parameters** + +- `address_of_binary_instruction` ([UInt64](../../data_types/int_uint.md)) — Address of instruction in a running process. + +**Returned value** + +- Symbol from ClickHouse object files. +- Empty string, if the address is not valid. + +Type: [String](../../data_types/string.md). + +**Example** + +Enabling introspection functions: + +```sql +SET allow_introspection_functions=1 +``` + +Selecting the first string from the `trace_log` system table: + +```sql +SELECT * FROM system.trace_log LIMIT 1 \G +``` +```text +Row 1: +────── +event_date: 2019-11-20 +event_time: 2019-11-20 16:57:59 +revision: 54429 +timer_type: Real +thread_number: 48 +query_id: 724028bf-f550-45aa-910d-2af6212b94ac +trace: [94138803686098,94138815010911,94138815096522,94138815101224,94138815102091,94138814222988,94138806823642,94138814457211,94138806823642,94138814457211,94138806823642,94138806795179,94138806796144,94138753770094,94138753771646,94138753760572,94138852407232,140399185266395,140399178045583] +``` + +The `trace` field contains the stack trace at the moment of sampling. + +Getting a symbol for a single address: + +```sql +SELECT addressToSymbol(94138803686098) \G +``` +```text +Row 1: +────── +addressToSymbol(94138803686098): _ZNK2DB24IAggregateFunctionHelperINS_20AggregateFunctionSumImmNS_24AggregateFunctionSumDataImEEEEE19addBatchSinglePlaceEmPcPPKNS_7IColumnEPNS_5ArenaE +``` + +Applying the function to the whole stack trace: + +```sql +SELECT + arrayStringConcat(arrayMap(x -> addressToSymbol(x), trace), '\n') AS trace_symbols +FROM system.trace_log +LIMIT 1 +\G +``` + +The [arrayMap](higher_order_functions.md#higher_order_functions-array-map) function allows to process each individual element of the `trace` array by the `addressToSymbols` function. The result of this processing you see in the `trace_symbols` column of output. + + +```text +Row 1: +────── +trace_symbols: _ZNK2DB24IAggregateFunctionHelperINS_20AggregateFunctionSumImmNS_24AggregateFunctionSumDataImEEEEE19addBatchSinglePlaceEmPcPPKNS_7IColumnEPNS_5ArenaE +_ZNK2DB10Aggregator21executeWithoutKeyImplERPcmPNS0_28AggregateFunctionInstructionEPNS_5ArenaE +_ZN2DB10Aggregator14executeOnBlockESt6vectorIN3COWINS_7IColumnEE13immutable_ptrIS3_EESaIS6_EEmRNS_22AggregatedDataVariantsERS1_IPKS3_SaISC_EERS1_ISE_SaISE_EERb +_ZN2DB10Aggregator14executeOnBlockERKNS_5BlockERNS_22AggregatedDataVariantsERSt6vectorIPKNS_7IColumnESaIS9_EERS6_ISB_SaISB_EERb +_ZN2DB10Aggregator7executeERKSt10shared_ptrINS_17IBlockInputStreamEERNS_22AggregatedDataVariantsE +_ZN2DB27AggregatingBlockInputStream8readImplEv +_ZN2DB17IBlockInputStream4readEv +_ZN2DB26ExpressionBlockInputStream8readImplEv +_ZN2DB17IBlockInputStream4readEv +_ZN2DB26ExpressionBlockInputStream8readImplEv +_ZN2DB17IBlockInputStream4readEv +_ZN2DB28AsynchronousBlockInputStream9calculateEv +_ZNSt17_Function_handlerIFvvEZN2DB28AsynchronousBlockInputStream4nextEvEUlvE_E9_M_invokeERKSt9_Any_data +_ZN14ThreadPoolImplI20ThreadFromGlobalPoolE6workerESt14_List_iteratorIS0_E +_ZZN20ThreadFromGlobalPoolC4IZN14ThreadPoolImplIS_E12scheduleImplIvEET_St8functionIFvvEEiSt8optionalImEEUlvE1_JEEEOS4_DpOT0_ENKUlvE_clEv +_ZN14ThreadPoolImplISt6threadE6workerESt14_List_iteratorIS0_E +execute_native_thread_routine +start_thread +clone +``` + +## demangle {#demangle} + +Converts a symbol that you can get using the [addressToSymbol](#addresstosymbol) function to the C++ function name. + + +**Syntax** + +```sql +demangle(symbol) +``` + +**Parameters** + +- `symbol` ([String](../../data_types/string.md)) — Symbol from an object file. + +**Returned value** + +- Name of the C++ function. +- Empty string if a symbol is not valid. + +Type: [String](../../data_types/string.md). + +**Example** + +Enabling introspection functions: + +```sql +SET allow_introspection_functions=1 +``` + +Selecting the first string from the `trace_log` system table: + +```sql +SELECT * FROM system.trace_log LIMIT 1 \G +``` +```text +Row 1: +────── +event_date: 2019-11-20 +event_time: 2019-11-20 16:57:59 +revision: 54429 +timer_type: Real +thread_number: 48 +query_id: 724028bf-f550-45aa-910d-2af6212b94ac +trace: [94138803686098,94138815010911,94138815096522,94138815101224,94138815102091,94138814222988,94138806823642,94138814457211,94138806823642,94138814457211,94138806823642,94138806795179,94138806796144,94138753770094,94138753771646,94138753760572,94138852407232,140399185266395,140399178045583] +``` + +The `trace` field contains the stack trace at the moment of sampling. + +Getting a function name for a single address: + +```sql +SELECT demangle(addressToSymbol(94138803686098)) \G +``` +```text +Row 1: +────── +demangle(addressToSymbol(94138803686098)): DB::IAggregateFunctionHelper > >::addBatchSinglePlace(unsigned long, char*, DB::IColumn const**, DB::Arena*) const +``` + +Applying the function to the whole stack trace: + +```sql +SELECT + arrayStringConcat(arrayMap(x -> demangle(addressToSymbol(x)), trace), '\n') AS trace_functions +FROM system.trace_log +LIMIT 1 +\G +``` + +The [arrayMap](higher_order_functions.md#higher_order_functions-array-map) function allows to process each individual element of the `trace` array by the `demangle` function. The result of this processing you see in the `trace_functions` column of output. + +```text +Row 1: +────── +trace_functions: DB::IAggregateFunctionHelper > >::addBatchSinglePlace(unsigned long, char*, DB::IColumn const**, DB::Arena*) const +DB::Aggregator::executeWithoutKeyImpl(char*&, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, DB::Arena*) const +DB::Aggregator::executeOnBlock(std::vector::immutable_ptr, std::allocator::immutable_ptr > >, unsigned long, DB::AggregatedDataVariants&, std::vector >&, std::vector >, std::allocator > > >&, bool&) +DB::Aggregator::executeOnBlock(DB::Block const&, DB::AggregatedDataVariants&, std::vector >&, std::vector >, std::allocator > > >&, bool&) +DB::Aggregator::execute(std::shared_ptr const&, DB::AggregatedDataVariants&) +DB::AggregatingBlockInputStream::readImpl() +DB::IBlockInputStream::read() +DB::ExpressionBlockInputStream::readImpl() +DB::IBlockInputStream::read() +DB::ExpressionBlockInputStream::readImpl() +DB::IBlockInputStream::read() +DB::AsynchronousBlockInputStream::calculate() +std::_Function_handler::_M_invoke(std::_Any_data const&) +ThreadPoolImpl::worker(std::_List_iterator) +ThreadFromGlobalPool::ThreadFromGlobalPool::scheduleImpl(std::function, int, std::optional)::{lambda()#3}>(ThreadPoolImpl::scheduleImpl(std::function, int, std::optional)::{lambda()#3}&&)::{lambda()#1}::operator()() const +ThreadPoolImpl::worker(std::_List_iterator) +execute_native_thread_routine +start_thread +clone +``` diff --git a/docs/en/query_language/functions/json_functions.md b/docs/en/query_language/functions/json_functions.md index 6ab942bd012..eeb41870112 100644 --- a/docs/en/query_language/functions/json_functions.md +++ b/docs/en/query_language/functions/json_functions.md @@ -206,4 +206,16 @@ Example: SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = '[-100, 200.0, 300]' ``` +## JSONExtractArrayRaw(json[, indices_or_keys]...) + +Returns an array with elements of JSON array, each represented as unparsed string. + +If the part does not exist or isn't array, an empty array will be returned. + +Example: + +```sql +SELECT JSONExtractArrayRaw('{"a": "hello", "b": [-100, 200.0, "hello"]}', 'b') = ['-100', '200.0', '"hello"']' +``` + [Original article](https://clickhouse.yandex/docs/en/query_language/functions/json_functions/) diff --git a/docs/en/query_language/functions/other_functions.md b/docs/en/query_language/functions/other_functions.md index f6139741849..394cd78c0f3 100644 --- a/docs/en/query_language/functions/other_functions.md +++ b/docs/en/query_language/functions/other_functions.md @@ -4,8 +4,39 @@ Returns a string with the name of the host that this function was performed on. For distributed processing, this is the name of the remote server host, if the function is performed on a remote server. -## FQDN(), fullHostName() -Returns the Fully qualified domain name aka [FQDN](https://en.wikipedia.org/wiki/Fully_qualified_domain_name). +## FQDN {#fqdn} + +Returns the fully qualified domain name. + +**Syntax** + +```sql +fqdn(); +``` + +This function is case-insensitive. + +**Returned value** + +- String with the fully qualified domain name. + +Type: `String`. + +**Example** + +Query: + +```sql +SELECT FQDN(); +``` + +Result: + +```text +┌─FQDN()──────────────────────────┐ +│ clickhouse.ru-central1.internal │ +└─────────────────────────────────┘ +``` ## basename diff --git a/docs/en/query_language/functions/string_functions.md b/docs/en/query_language/functions/string_functions.md index a45f41a4528..33e5700f355 100644 --- a/docs/en/query_language/functions/string_functions.md +++ b/docs/en/query_language/functions/string_functions.md @@ -217,17 +217,119 @@ Result: └───────────────────────────────────┘ ``` -## trimLeft(s) +## trimLeft {#trimleft} -Returns a string that removes the whitespace characters on left side. +Removes all consecutive occurrences of common whitespace (ASCII character 32) from the beginning of a string. It doesn't remove other kinds of whitespace characters (tab, no-break space, etc.). -## trimRight(s) +**Syntax** -Returns a string that removes the whitespace characters on right side. +```sql +trimLeft() +``` -## trimBoth(s) +Alias: `ltrim`. -Returns a string that removes the whitespace characters on either side. +**Parameters** + +- `string` — string to trim. [String](../../data_types/string.md). + +**Returned value** + +A string without leading common whitespaces. + +Type: `String`. + +**Example** + +Query: + +```sql +SELECT trimLeft(' Hello, world! ') +``` + +Result: + +```text +┌─trimLeft(' Hello, world! ')─┐ +│ Hello, world! │ +└─────────────────────────────────────┘ +``` + +## trimRight {#trimright} + +Removes all consecutive occurrences of common whitespace (ASCII character 32) from the end of a string. It doesn't remove other kinds of whitespace characters (tab, no-break space, etc.). + +**Syntax** + +```sql +trimRight() +``` + +Alias: `rtrim`. + +**Parameters** + +- `string` — string to trim. [String](../../data_types/string.md). + +**Returned value** + +A string without trailing common whitespaces. + +Type: `String`. + +**Example** + +Query: + +```sql +SELECT trimRight(' Hello, world! ') +``` + +Result: + +```text +┌─trimRight(' Hello, world! ')─┐ +│ Hello, world! │ +└──────────────────────────────────────┘ +``` + +## trimBoth {#trimboth} + +Removes all consecutive occurrences of common whitespace (ASCII character 32) from both ends of a string. It doesn't remove other kinds of whitespace characters (tab, no-break space, etc.). + +**Syntax** + +```sql +trimBoth() +``` + +Alias: `trim`. + +**Parameters** + +- `string` — string to trim. [String](../../data_types/string.md). + +**Returned value** + +A string without leading and trailing common whitespaces. + +Type: `String`. + +**Example** + +Query: + +```sql +SELECT trimBoth(' Hello, world! ') +``` + +Result: + +```text +┌─trimBoth(' Hello, world! ')─┐ +│ Hello, world! │ +└─────────────────────────────────────┘ +``` ## CRC32(s) diff --git a/docs/en/query_language/functions/type_conversion_functions.md b/docs/en/query_language/functions/type_conversion_functions.md index 900bc8e0629..ec0ddbe3e9d 100644 --- a/docs/en/query_language/functions/type_conversion_functions.md +++ b/docs/en/query_language/functions/type_conversion_functions.md @@ -351,8 +351,30 @@ SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null ## toInterval(Year|Quarter|Month|Week|Day|Hour|Minute|Second) {#function-tointerval} -Converts a Number type argument to a Interval type (duration). -The interval type is actually very useful, you can use this type of data to perform arithmetic operations directly with Date or DateTime. At the same time, ClickHouse provides a more convenient syntax for declaring Interval type data. For example: +Converts a Number type argument to an [Interval](../../data_types/special_data_types/interval.md) data type. + +**Syntax** + +```sql +toIntervalSecond(number) +toIntervalMinute(number) +toIntervalHour(number) +toIntervalDay(number) +toIntervalWeek(number) +toIntervalMonth(number) +toIntervalQuarter(number) +toIntervalYear(number) +``` + +**Parameters** + +- `number` — Duration of interval. Positive integer number. + +**Returned values** + +- The value in `Interval` data type. + +**Example** ```sql WITH diff --git a/docs/en/query_language/operators.md b/docs/en/query_language/operators.md index a12d097b8e5..571685e61d0 100644 --- a/docs/en/query_language/operators.md +++ b/docs/en/query_language/operators.md @@ -1,69 +1,69 @@ # Operators -All operators are transformed to the corresponding functions at the query parsing stage, in accordance with their precedence and associativity. +All operators are transformed to their corresponding functions at the query parsing stage in accordance with their precedence and associativity. Groups of operators are listed in order of priority (the higher it is in the list, the earlier the operator is connected to its arguments). ## Access Operators -`a[N]` Access to an element of an array; ` arrayElement(a, N) function`. +`a[N]` – Access to an element of an array. The `arrayElement(a, N)` function. -`a.N` – Access to a tuple element; `tupleElement(a, N)` function. +`a.N` – Access to a tuple element. The `tupleElement(a, N)` function. ## Numeric Negation Operator -`-a` – The `negate (a)` function. +`-a` – The `negate (a)` function. ## Multiplication and Division Operators -`a * b` – The `multiply (a, b) function.` +`a * b` – The `multiply (a, b)` function. -`a / b` – The ` divide(a, b) function.` +`a / b` – The `divide(a, b)` function. -`a % b` – The `modulo(a, b) function.` +`a % b` – The `modulo(a, b)` function. ## Addition and Subtraction Operators -`a + b` – The `plus(a, b) function.` +`a + b` – The `plus(a, b)` function. -`a - b` – The `minus(a, b) function.` +`a - b` – The `minus(a, b)` function. ## Comparison Operators -`a = b` – The `equals(a, b) function.` +`a = b` – The `equals(a, b)` function. -`a == b` – The ` equals(a, b) function.` +`a == b` – The `equals(a, b)` function. -`a != b` – The `notEquals(a, b) function.` +`a != b` – The `notEquals(a, b)` function. -`a <> b` – The `notEquals(a, b) function.` +`a <> b` – The `notEquals(a, b)` function. -`a <= b` – The `lessOrEquals(a, b) function.` +`a <= b` – The `lessOrEquals(a, b)` function. -`a >= b` – The `greaterOrEquals(a, b) function.` +`a >= b` – The `greaterOrEquals(a, b)` function. -`a < b` – The `less(a, b) function.` +`a < b` – The `less(a, b)` function. -`a > b` – The `greater(a, b) function.` +`a > b` – The `greater(a, b)` function. -`a LIKE s` – The `like(a, b) function.` +`a LIKE s` – The `like(a, b)` function. -`a NOT LIKE s` – The `notLike(a, b) function.` +`a NOT LIKE s` – The `notLike(a, b)` function. -`a BETWEEN b AND c` – The same as `a >= b AND a <= c.` +`a BETWEEN b AND c` – The same as `a >= b AND a <= c`. -`a NOT BETWEEN b AND c` – The same as `a < b OR a > c.` +`a NOT BETWEEN b AND c` – The same as `a < b OR a > c`. ## Operators for Working With Data Sets -*See the section [IN operators](select.md#select-in-operators).* +*See [IN operators](select.md#select-in-operators).* -`a IN ...` – The `in(a, b) function` +`a IN ...` – The `in(a, b)` function. -`a NOT IN ...` – The `notIn(a, b) function.` +`a NOT IN ...` – The `notIn(a, b)` function. -`a GLOBAL IN ...` – The `globalIn(a, b) function.` +`a GLOBAL IN ...` – The `globalIn(a, b)` function. -`a GLOBAL NOT IN ...` – The `globalNotIn(a, b) function.` +`a GLOBAL NOT IN ...` – The `globalNotIn(a, b)` function. ## Operators for Working with Dates and Times {#operators-datetime} @@ -144,7 +144,7 @@ Types of intervals: - `YEAR` !!! warning "Warning" - Intervals of different types can't be combined. You can't use the expressions like `INTERVAL 4 DAY 1 HOUR`. Express intervals in the units that smaller or equal the the smallest unit of the interval, for example `INTERVAL 25 HOUR`. Also you can use consequtive operations like in the example below. + Intervals with different types can't be combined. You can't use expressions like `INTERVAL 4 DAY 1 HOUR`. Express intervals in units that are smaller or equal the the smallest unit of the interval, for example `INTERVAL 25 HOUR`. You can use consequtive operations like in the example below. Example: @@ -164,19 +164,19 @@ SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL ## Logical Negation Operator -`NOT a` The `not(a) function.` +`NOT a` – The `not(a)` function. ## Logical AND Operator -`a AND b` – The`and(a, b) function.` +`a AND b` – The`and(a, b)` function. ## Logical OR Operator -`a OR b` – The `or(a, b) function.` +`a OR b` – The `or(a, b)` function. ## Conditional Operator -`a ? b : c` – The `if(a, b, c) function.` +`a ? b : c` – The `if(a, b, c)` function. Note: diff --git a/docs/en/query_language/select.md b/docs/en/query_language/select.md index 10569ee801c..cfa3a1e4dc7 100644 --- a/docs/en/query_language/select.md +++ b/docs/en/query_language/select.md @@ -112,8 +112,20 @@ In contrast to standard SQL, a synonym does not need to be specified after a sub To execute a query, all the columns listed in the query are extracted from the appropriate table. Any columns not needed for the external query are thrown out of the subqueries. If a query does not list any columns (for example, `SELECT count() FROM t`), some column is extracted from the table anyway (the smallest one is preferred), in order to calculate the number of rows. -The `FINAL` modifier can be used in the `SELECT` select query for engines from the [MergeTree](../operations/table_engines/mergetree.md) family. When you specify `FINAL`, data is selected fully "merged". Keep in mind that using `FINAL` leads to reading columns related to the primary key, in addition to the columns specified in the query. Additionally, the query will be executed in a single thread, and data will be merged during query execution. This means that when using `FINAL`, the query is processed slowly. In the most cases, avoid using `FINAL`. -The `FINAL` modifier can be applied for all engines of MergeTree family that do data transformations in background merges (except GraphiteMergeTree). +#### FINAL Modifier {#select-from-final} + +Appliable when selecting data from tables of the [MergeTree](../operations/table_engines/mergetree.md)-engine family, except `GraphiteMergeTree`. When `FINAL` is specified, ClickHouse fully merges data before returning the result and thus performs all data transformations that are supposed to happen during merges for given table engine. + +Also supported for: +- [Replicated](../operations/table_engines/replication.md) versions of `MergeTree` engines. +- [View](../operations/table_engines/view.md), [Buffer](../operations/table_engines/buffer.md), [Distributed](../operations/table_engines/distributed.md), [MaterializedView](../operations/table_engines/materializedview.md) engines that operate over other engines, if they created over `MergeTree`-engine tables. + +The queries that use `FINAL` are executed slower than similar queries that don't, because: + +- Query is executed in a single thread, and data is merged during query execution. +- Queries with `FINAL` read primary key columns additionally to the columns specified in the query. + +In the most cases, avoid using `FINAL`. ### SAMPLE Clause {#select-sample-clause} diff --git a/docs/fa/getting_started/index.md b/docs/fa/getting_started/index.md index 778393aed91..57496c474e2 100644 --- a/docs/fa/getting_started/index.md +++ b/docs/fa/getting_started/index.md @@ -1,197 +1,11 @@
+# ﻥﺪﺷ ﻉﻭﺮﺷ -# شروع به کار +ﻖﯾﺮﻃ ﺯﺍ ﺪﯾﺎﺑ ﻪﻤﻫ ﺯﺍ ﻝﻭﺍ ، ﺪﯿﻨﮐ ﺱﺎﺴﺣﺍ ﺍﺭ ﻥﺁ ﺩﺮﮑﻠﻤﻋ ﺪﯿﻫﺍﻮﺧ ﯽﻣ ﻭ ﺪﯿﺘﺴﻫ ﺩﺭﺍﻭ ﻩﺯﺎﺗ[ﺐﺼﻧ ﻞﺣﺍﺮﻣ](install.md). +ﺪﯿﻨﮐ ﺏﺎﺨﺘﻧﺍ ﺍﺭ ﺮﯾﺯ ﯼﺎﻫ ﻪﻨﯾﺰﮔ ﺯﺍ ﯽﮑﯾ ﺪﯿﻧﺍﻮﺗ ﯽﻣ ﻥﺁ ﺯﺍ ﺲﭘ: -## نیازمندی های سیستم - -این یک سیستم چند سکویی (Cross-Platform) نمی باشد. این ابزار نیاز به Linux Ubuntu Precise (12.04) یا جدیدتر، با معماری x86\_64 و پشتیبانی از SSE 4.2 می باشد. برای چک کردن SSE 4.2 خروجی دستور زیر را بررسی کنید: +* [ﺪﯿﻨﮐ ﯽﻃ ﺍﺭ ﻞﺼﻔﻣ ﺵﺯﻮﻣﺁ](tutorial.md) +* [ﺪﯿﻨﮐ ﺶﯾﺎﻣﺯﺁ ﻪﻧﻮﻤﻧ ﯼﺎﻫ ﻩﺩﺍﺩ ﺎﺑ](example_datasets/ontime.md) +[ﯽﻠﺻﺍ ﻪﻟﺎﻘﻣ](https://clickhouse.yandex/docs/fa/getting_started/)
- -```bash -grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not supported" -``` - -
- -پیشنهاد می کنیم از Ubuntu TrustyT، Ubuntu Xenial یا Ubuntu Precise استفاده کنید. ترمینال باید از UTF-8 پشتیبانی کند. (به صورت پیش فرض در Ubuntu پشتیبانی می شود). - -## نصب - -### نصب از طریق پکیج های Debian/Ubuntu - -در فایل `/etc/apt/sources.list` (یا در یک فایل جدا `/etc/apt/sources.list.d/clickhouse.list`)، Repo زیر را اضافه کنید: - -
- -``` -deb http://repo.yandex.ru/clickhouse/deb/stable/ main/ -``` - -
- -اگر شما میخوایید جدیدترین نسخه ی تست را استفاده کنید، 'stable' رو به 'testing' تغییر بدید. - -سپس دستورات زیر را اجرا کنید: - -
- -```bash -sudo apt-get install dirmngr # optional -sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E0C56BD4 # optional -sudo apt-get update -sudo apt-get install clickhouse-client clickhouse-server -``` - -
- -شما همچنین می توانید از طریق لینک زیر پکیج ClickHouse را به صورت دستی دانلود و نصب کنید: . - -ClickHouse دارای تنظیمات محدودیت دسترسی می باشد. این تنظیمات در فایل 'users.xml' (کنار 'config.xml') می باشد. به صورت پیش فرض دسترسی برای کاربر 'default' از همه جا بدون نیاز به پسورد وجود دارد. 'user/default/networks' را مشاهده کنید. برای اطلاعات بیشتر قسمت "تنظیمات فایل ها" را مشاهده کنید. - - RPM ﯼﺎﻫ ﻪﺘﺴﺑ ﺯﺍ ### - -.ﺪﻨﮐ ﯽﻣ ﻪﯿﺻﻮﺗ ﺲﮐﻮﻨﯿﻟ ﺮﺑ ﯽﻨﺘﺒﻣ rpm ﺮﺑ ﯽﻨﺘﺒﻣ ﯼﺎﻫ ﻊﯾﺯﻮﺗ ﺮﯾﺎﺳ ﻭ CentOS ، RedHat ﯼﺍ - - :ﺪﯿﻨﮐ ﻪﻓﺎﺿﺍ ﺍﺭ ﯽﻤﺳﺭ ﻥﺰﺨﻣ ﺪﯾﺎﺑ ﺍﺪﺘﺑﺍ - -```bash -sudo yum install yum-utils -sudo rpm --import https://repo.yandex.ru/clickhouse/CLICKHOUSE-KEY.GPG -sudo yum-config-manager --add-repo https://repo.yandex.ru/clickhouse/rpm/stable/x86_64 -``` - -.(ﺩﻮﺷ ﯽﻣ ﻪﯿﺻﻮﺗ ﺎﻤﺷ ﺶﯾﺎﻣﺯﺁ ﯼﺎﻫ ﻂﯿﺤﻣ ﯼﺍﺮﺑ ﻦﯾﺍ) ﺪﯿﻨﮐ ﻦﯾﺰﮕﯾﺎﺟ "ﺖﺴﺗ" ﺎﺑ ﺍﺭ "ﺭﺍﺪﯾﺎﭘ" - - :ﺪﯿﻨﮐ ﺐﺼﻧ ﺍﺭ ﺎﻫ ﻪﺘﺴﺑ ﻊﻗﺍﻭ ﺭﺩ ﺎﺗ ﺪﯿﻨﮐ ﺍﺮﺟﺍ ﺍﺭ ﺕﺍﺭﻮﺘﺳﺩ ﻦﯾﺍ ﺲﭙﺳ - -```bash -sudo yum install clickhouse-server clickhouse-client -``` - -. :ﺪﯿﻨﮐ ﺐﺼﻧ ﻭ ﯼﺮﯿﮔﺭﺎﺑ ﺎﺠﻨ - - Docker Image ﺯﺍ ### - -.ﺪﻨﻨﮐ ﯽﻣ ﻩﺩﺎﻔﺘﺳﺍ ﻞﺧﺍﺩ ﺭﺩ "deb" ﯽﻤﺳﺭ ﯼﺎﻫ ﻪﺘﺴﺑ ﺯﺍ ﺮﯾﻭﺎﺼﺗ ﻦﯾﺍ .ﺪﯿﻨﮐ ﻝﺎﺒﻧﺩ ﺍﺭ (/ht - - -### نصب از طریق Source - -برای Compile، دستورالعمل های فایل build.md را دنبال کنید: - -شما میتوانید پکیج را compile و نصب کنید. شما همچنین می توانید بدون نصب پکیج از برنامه ها استفاده کنید. - -
- -``` -Client: dbms/programs/clickhouse-client -Server: dbms/programs/clickhouse-server -``` - -
- -برای سرور، یک کاتالوگ با دیتا بسازید، مانند - -
- -``` -/opt/clickhouse/data/default/ -/opt/clickhouse/metadata/default/ -``` - -
- -(قابل تنظیم در تنظیمات سرور). 'chown' را برای کاربر دلخواه اجرا کنید. - -به مسیر لاگ ها در تنظیمات سرور توجه کنید (src/dbms/programs/config.xml). - -### روش های دیگر نصب - -Docker image: - -پکیج RPM برای CentOS یا RHEL: - -Gentoo: `emerge clickhouse` - -## راه اندازی - -برای استارت سرور (به صورت daemon)، دستور زیر را اجرا کنید: - -
- -```bash -sudo service clickhouse-server start -``` - -
- -لاگ های دایرکتوری `/var/log/clickhouse-server/` directory. را مشاهده کنید. - -اگر سرور استارت نشد، فایل تنظیمات را بررسی کنید `/etc/clickhouse-server/config.xml.` - -شما همچنین می توانید سرور را از طریق کنسول راه اندازی کنید: - -
- -```bash -clickhouse-server --config-file=/etc/clickhouse-server/config.xml -``` - -
- -در این مورد که مناسب زمان توسعه می باشد، لاگ ها در کنسول پرینت می شوند. اگر فایل تنظیمات در دایرکتوری جاری باشد، نیازی به مشخص کردن '--config-file' نمی باشد. به صورت پیش فرض از './config.xml' استفاده می شود. - -شما می توانید از کلاینت command-line برای اتصال به سرور استفاده کنید: - -
- -```bash -clickhouse-client -``` - -
- -پارامترهای پیش فرض، نشان از اتصال به localhost:9000 از طرف کاربر 'default' بدون پسورد را می دهد. از کلاینت میتوان برای اتصال به یک سرور remote استفاده کرد. مثال: - -
- -```bash -clickhouse-client --host=example.com -``` - -
- -برای اطلاعات بیشتر، بخش "کلاینت Command-line" را مشاهده کنید. - -چک کردن سیستم: - -
- -```bash -milovidov@hostname:~/work/metrica/src/dbms/src/Client$ ./clickhouse-client -ClickHouse client version 0.0.18749. -Connecting to localhost:9000. -Connected to ClickHouse server version 0.0.18749. - -:) SELECT 1 - -SELECT 1 - -┌─1─┐ -│ 1 │ -└───┘ - -1 rows in set. Elapsed: 0.003 sec. - -:) -``` - -
- -**تبریک میگم، سیستم کار می کنه!** - -برای ادامه آزمایشات، شما میتوانید دیتاست های تستی را دریافت و امتحان کنید. - -
-[مقاله اصلی](https://clickhouse.yandex/docs/fa/getting_started/) diff --git a/docs/fa/getting_started/install.md b/docs/fa/getting_started/install.md new file mode 100644 index 00000000000..790c9381007 --- /dev/null +++ b/docs/fa/getting_started/install.md @@ -0,0 +1,199 @@ +
+ +# ﯼﺯﺍﺪﻧﺍ ﻩﺍﺭ ﻭ ﺐﺼﻧ + +## نیازمندی های سیستم + +ClickHouse ﺲﮐﻮﻨﯿﻟ ﻉﻮﻧ ﺮﻫ ﯼﻭﺭ ﺮﺑ ﺪﻧﺍﻮﺗ ﯽﻣ ، FreeBSD ﺎﯾ Mac OS X ﯼﺭﺎﻤﻌﻣ ﺎﺑ CPU x + +:ﺖﺳﺍ ﻩﺪﻣﺁ ، ﺪﻨﮐ ﯽﻣ ﯽﻧﺎﺒﯿﺘﺸﭘ SSE 4.2 ﺯﺍ ﯽﻠﻌﻓ CPU ﺎﯾﺁ ﻪﮑﻨﯾﺍ ﯽﺳﺭﺮﺑ ﯼﺍﺮﺑ ﺭﻮﺘﺳﺩ ﻦﯾﺍ + +
+ +```bash +grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not supported" +``` + +
+ +ﺪﯾﺎﺑ ، ﺪﻧﺭﺍﺪﻧ PowerPC64LE ﺎﯾ AArch64 ﯼﺭﺎﻤﻌﻣ ﺎﯾ ﺪﻨﻨﮐ ﯽﻤﻧ ﯽﻧﺎﺒﯿﺘﺸﭘ SSE 4.2 ﺯﺍ ﻪﮐ[ClickHouse ﺪﯿﻨﮐ ﺩﺎﺠﯾﺍ ﻊﺑﺎﻨﻣ ﺯﺍ ﺍﺭ](#from-sources) ﺐﺳﺎﻨﻣ ﺕﺎﻤﯿﻈﻨﺗ ﺎﺑ + +##ﺩﻮﺟﻮﻣ ﺐﺼﻧ ﯼﺎﻫ ﻪﻨﯾﺰﮔ + +### نصب از طریق پکیج های Debian/Ubuntu {#from-deb-packages} + +در فایل `/etc/apt/sources.list` (یا در یک فایل جدا `/etc/apt/sources.list.d/clickhouse.list`)، Repo زیر را اضافه کنید: + +
+ +``` +deb http://repo.yandex.ru/clickhouse/deb/stable/ main/ +``` + +
+ +اگر شما میخوایید جدیدترین نسخه ی تست را استفاده کنید، 'stable' رو به 'testing' تغییر بدید. + +سپس دستورات زیر را اجرا کنید: + +
+ +```bash +sudo apt-get install dirmngr # optional +sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E0C56BD4 # optional +sudo apt-get update +sudo apt-get install clickhouse-client clickhouse-server +``` + +
+ +شما همچنین می توانید از طریق لینک زیر پکیج ClickHouse را به صورت دستی دانلود و نصب کنید: . + +ClickHouse دارای تنظیمات محدودیت دسترسی می باشد. این تنظیمات در فایل 'users.xml' (کنار 'config.xml') می باشد. به صورت پیش فرض دسترسی برای کاربر 'default' از همه جا بدون نیاز به پسورد وجود دارد. 'user/default/networks' را مشاهده کنید. برای اطلاعات بیشتر قسمت "تنظیمات فایل ها" را مشاهده کنید. + +### RPM ﯼﺎﻫ ﻪﺘﺴﺑ ﺯﺍ {#from-rpm-packages} + +.ﺪﻨﮐ ﯽﻣ ﻪﯿﺻﻮﺗ ﺲﮐﻮﻨﯿﻟ ﺮﺑ ﯽﻨﺘﺒﻣ rpm ﺮﺑ ﯽﻨﺘﺒﻣ ﯼﺎﻫ ﻊﯾﺯﻮﺗ ﺮﯾﺎﺳ ﻭ CentOS ، RedHat ﯼﺍ + + :ﺪﯿﻨﮐ ﻪﻓﺎﺿﺍ ﺍﺭ ﯽﻤﺳﺭ ﻥﺰﺨﻣ ﺪﯾﺎﺑ ﺍﺪﺘﺑﺍ + +```bash +sudo yum install yum-utils +sudo rpm --import https://repo.yandex.ru/clickhouse/CLICKHOUSE-KEY.GPG +sudo yum-config-manager --add-repo https://repo.yandex.ru/clickhouse/rpm/stable/x86_64 +``` + +.(ﺩﻮﺷ ﯽﻣ ﻪﯿﺻﻮﺗ ﺎﻤﺷ ﺶﯾﺎﻣﺯﺁ ﯼﺎﻫ ﻂﯿﺤﻣ ﯼﺍﺮﺑ ﻦﯾﺍ) ﺪﯿﻨﮐ ﻦﯾﺰﮕﯾﺎﺟ "ﺖﺴﺗ" ﺎﺑ ﺍﺭ "ﺭﺍﺪﯾﺎﭘ" + + :ﺪﯿﻨﮐ ﺐﺼﻧ ﺍﺭ ﺎﻫ ﻪﺘﺴﺑ ﻊﻗﺍﻭ ﺭﺩ ﺎﺗ ﺪﯿﻨﮐ ﺍﺮﺟﺍ ﺍﺭ ﺕﺍﺭﻮﺘﺳﺩ ﻦﯾﺍ ﺲﭙﺳ + +```bash +sudo yum install clickhouse-server clickhouse-client +``` + +. :ﺪﯿﻨﮐ ﺐﺼﻧ ﻭ ﯼﺮﯿﮔﺭﺎﺑ ﺎﺠﻨ + + Docker Image ﺯﺍ ### + +.ﺪﻨﻨﮐ ﯽﻣ ﻩﺩﺎﻔﺘﺳﺍ ﻞﺧﺍﺩ ﺭﺩ "deb" ﯽﻤﺳﺭ ﯼﺎﻫ ﻪﺘﺴﺑ ﺯﺍ ﺮﯾﻭﺎﺼﺗ ﻦﯾﺍ .ﺪﯿﻨﮐ ﻝﺎﺒﻧﺩ ﺍﺭ (/ht + + +### نصب از طریق Source {#from-sources} + +برای Compile، دستورالعمل های فایل build.md را دنبال کنید: + +شما میتوانید پکیج را compile و نصب کنید. شما همچنین می توانید بدون نصب پکیج از برنامه ها استفاده کنید. + +
+ +``` +Client: dbms/programs/clickhouse-client +Server: dbms/programs/clickhouse-server +``` + +
+ +برای سرور، یک کاتالوگ با دیتا بسازید، مانند + +
+ +``` +/opt/clickhouse/data/default/ +/opt/clickhouse/metadata/default/ +``` + +
+ +(قابل تنظیم در تنظیمات سرور). 'chown' را برای کاربر دلخواه اجرا کنید. + +به مسیر لاگ ها در تنظیمات سرور توجه کنید (src/dbms/programs/config.xml). + +### روش های دیگر نصب {#from-docker-image} + +Docker image: + +پکیج RPM برای CentOS یا RHEL: + +Gentoo: `emerge clickhouse` + +## راه اندازی + +برای استارت سرور (به صورت daemon)، دستور زیر را اجرا کنید: + +
+ +```bash +sudo service clickhouse-server start +``` + +
+ +لاگ های دایرکتوری `/var/log/clickhouse-server/` directory. را مشاهده کنید. + +اگر سرور استارت نشد، فایل تنظیمات را بررسی کنید `/etc/clickhouse-server/config.xml.` + +شما همچنین می توانید سرور را از طریق کنسول راه اندازی کنید: + +
+ +```bash +clickhouse-server --config-file=/etc/clickhouse-server/config.xml +``` + +
+ +در این مورد که مناسب زمان توسعه می باشد، لاگ ها در کنسول پرینت می شوند. اگر فایل تنظیمات در دایرکتوری جاری باشد، نیازی به مشخص کردن '--config-file' نمی باشد. به صورت پیش فرض از './config.xml' استفاده می شود. + +شما می توانید از کلاینت command-line برای اتصال به سرور استفاده کنید: + +
+ +```bash +clickhouse-client +``` + +
+ +پارامترهای پیش فرض، نشان از اتصال به localhost:9000 از طرف کاربر 'default' بدون پسورد را می دهد. از کلاینت میتوان برای اتصال به یک سرور remote استفاده کرد. مثال: + +
+ +```bash +clickhouse-client --host=example.com +``` + +
+ +برای اطلاعات بیشتر، بخش "کلاینت Command-line" را مشاهده کنید. + +چک کردن سیستم: + +
+ +```bash +milovidov@hostname:~/work/metrica/src/dbms/src/Client$ ./clickhouse-client +ClickHouse client version 0.0.18749. +Connecting to localhost:9000. +Connected to ClickHouse server version 0.0.18749. + +:) SELECT 1 + +SELECT 1 + +┌─1─┐ +│ 1 │ +└───┘ + +1 rows in set. Elapsed: 0.003 sec. + +:) +``` + +
+ +**تبریک میگم، سیستم کار می کنه!** + +برای ادامه آزمایشات، شما میتوانید دیتاست های تستی را دریافت و امتحان کنید. + +
+[مقاله اصلی](https://clickhouse.yandex/docs/fa/getting_started/install/) diff --git a/docs/fa/getting_started/tutorial.md b/docs/fa/getting_started/tutorial.md new file mode 120000 index 00000000000..8bc40816ab2 --- /dev/null +++ b/docs/fa/getting_started/tutorial.md @@ -0,0 +1 @@ +../../en/getting_started/tutorial.md \ No newline at end of file diff --git a/docs/fa/introduction/ya_metrika_task.md b/docs/fa/introduction/history.md similarity index 99% rename from docs/fa/introduction/ya_metrika_task.md rename to docs/fa/introduction/history.md index 1ea434f248c..abde10aa6f3 100644 --- a/docs/fa/introduction/ya_metrika_task.md +++ b/docs/fa/introduction/history.md @@ -1,6 +1,6 @@
-# Yandex.Metrica use case +# ClickHouse ﻪﭽﺨﯾﺭﺎﺗ ClickHouse در ابتدا برای قدرت به Yandex.Metrica دومین بستر آنالیز وب در دنیا توسعه داده شد، و همچنان جز اصلی آن است. ClickHouse اجازه می دهند که با بیش از 13 تریلیون رکورد در دیتابیس و بیش از 20 میلیارد event در روز، گزارش های مستقیم (On the fly) از داده های non-aggregate تهیه کنیم. این مقاله پیشنیه ی تاریخی در ارتباط با اهداف اصلی ClickHouse قبل از آنکه به یک محصول open source تبدیل شود، می دهد. diff --git a/docs/fa/query_language/functions/introspection.md b/docs/fa/query_language/functions/introspection.md new file mode 120000 index 00000000000..b1a487e9c77 --- /dev/null +++ b/docs/fa/query_language/functions/introspection.md @@ -0,0 +1 @@ +../../../en/query_language/functions/introspection.md \ No newline at end of file diff --git a/docs/ja/changelog.md b/docs/ja/changelog.md new file mode 120000 index 00000000000..699cc9e7b7c --- /dev/null +++ b/docs/ja/changelog.md @@ -0,0 +1 @@ +../../CHANGELOG.md \ No newline at end of file diff --git a/docs/ja/data_types/array.md b/docs/ja/data_types/array.md new file mode 120000 index 00000000000..808c98bf91a --- /dev/null +++ b/docs/ja/data_types/array.md @@ -0,0 +1 @@ +../../en/data_types/array.md \ No newline at end of file diff --git a/docs/ja/data_types/boolean.md b/docs/ja/data_types/boolean.md new file mode 120000 index 00000000000..42e84f1e52a --- /dev/null +++ b/docs/ja/data_types/boolean.md @@ -0,0 +1 @@ +../../en/data_types/boolean.md \ No newline at end of file diff --git a/docs/ja/data_types/date.md b/docs/ja/data_types/date.md new file mode 120000 index 00000000000..d1ebc137e8f --- /dev/null +++ b/docs/ja/data_types/date.md @@ -0,0 +1 @@ +../../en/data_types/date.md \ No newline at end of file diff --git a/docs/ja/data_types/datetime.md b/docs/ja/data_types/datetime.md new file mode 120000 index 00000000000..2eb9f44e6eb --- /dev/null +++ b/docs/ja/data_types/datetime.md @@ -0,0 +1 @@ +../../en/data_types/datetime.md \ No newline at end of file diff --git a/docs/ja/data_types/decimal.md b/docs/ja/data_types/decimal.md new file mode 120000 index 00000000000..ccea440adfa --- /dev/null +++ b/docs/ja/data_types/decimal.md @@ -0,0 +1 @@ +../../en/data_types/decimal.md \ No newline at end of file diff --git a/docs/ja/data_types/domains/ipv4.md b/docs/ja/data_types/domains/ipv4.md new file mode 120000 index 00000000000..eb4cc7d57b5 --- /dev/null +++ b/docs/ja/data_types/domains/ipv4.md @@ -0,0 +1 @@ +../../../en/data_types/domains/ipv4.md \ No newline at end of file diff --git a/docs/ja/data_types/domains/ipv6.md b/docs/ja/data_types/domains/ipv6.md new file mode 120000 index 00000000000..cca37a22458 --- /dev/null +++ b/docs/ja/data_types/domains/ipv6.md @@ -0,0 +1 @@ +../../../en/data_types/domains/ipv6.md \ No newline at end of file diff --git a/docs/ja/data_types/domains/overview.md b/docs/ja/data_types/domains/overview.md new file mode 120000 index 00000000000..13465d655ee --- /dev/null +++ b/docs/ja/data_types/domains/overview.md @@ -0,0 +1 @@ +../../../en/data_types/domains/overview.md \ No newline at end of file diff --git a/docs/ja/data_types/enum.md b/docs/ja/data_types/enum.md new file mode 120000 index 00000000000..23ebe64773e --- /dev/null +++ b/docs/ja/data_types/enum.md @@ -0,0 +1 @@ +../../en/data_types/enum.md \ No newline at end of file diff --git a/docs/ja/data_types/fixedstring.md b/docs/ja/data_types/fixedstring.md new file mode 120000 index 00000000000..53092fcb884 --- /dev/null +++ b/docs/ja/data_types/fixedstring.md @@ -0,0 +1 @@ +../../en/data_types/fixedstring.md \ No newline at end of file diff --git a/docs/ja/data_types/float.md b/docs/ja/data_types/float.md new file mode 120000 index 00000000000..d2ae6bd11de --- /dev/null +++ b/docs/ja/data_types/float.md @@ -0,0 +1 @@ +../../en/data_types/float.md \ No newline at end of file diff --git a/docs/ja/data_types/index.md b/docs/ja/data_types/index.md new file mode 120000 index 00000000000..c9f29d637f3 --- /dev/null +++ b/docs/ja/data_types/index.md @@ -0,0 +1 @@ +../../en/data_types/index.md \ No newline at end of file diff --git a/docs/ja/data_types/int_uint.md b/docs/ja/data_types/int_uint.md new file mode 120000 index 00000000000..3a913c9328e --- /dev/null +++ b/docs/ja/data_types/int_uint.md @@ -0,0 +1 @@ +../../en/data_types/int_uint.md \ No newline at end of file diff --git a/docs/ja/data_types/nested_data_structures/aggregatefunction.md b/docs/ja/data_types/nested_data_structures/aggregatefunction.md new file mode 120000 index 00000000000..36544324d2b --- /dev/null +++ b/docs/ja/data_types/nested_data_structures/aggregatefunction.md @@ -0,0 +1 @@ +../../../en/data_types/nested_data_structures/aggregatefunction.md \ No newline at end of file diff --git a/docs/ja/data_types/nested_data_structures/index.md b/docs/ja/data_types/nested_data_structures/index.md new file mode 120000 index 00000000000..a5659a9c5cd --- /dev/null +++ b/docs/ja/data_types/nested_data_structures/index.md @@ -0,0 +1 @@ +../../../en/data_types/nested_data_structures/index.md \ No newline at end of file diff --git a/docs/ja/data_types/nested_data_structures/nested.md b/docs/ja/data_types/nested_data_structures/nested.md new file mode 120000 index 00000000000..653a1ce31c3 --- /dev/null +++ b/docs/ja/data_types/nested_data_structures/nested.md @@ -0,0 +1 @@ +../../../en/data_types/nested_data_structures/nested.md \ No newline at end of file diff --git a/docs/ja/data_types/nullable.md b/docs/ja/data_types/nullable.md new file mode 120000 index 00000000000..0233f91d954 --- /dev/null +++ b/docs/ja/data_types/nullable.md @@ -0,0 +1 @@ +../../en/data_types/nullable.md \ No newline at end of file diff --git a/docs/ja/data_types/special_data_types/expression.md b/docs/ja/data_types/special_data_types/expression.md new file mode 120000 index 00000000000..4cec632b416 --- /dev/null +++ b/docs/ja/data_types/special_data_types/expression.md @@ -0,0 +1 @@ +../../../en/data_types/special_data_types/expression.md \ No newline at end of file diff --git a/docs/ja/data_types/special_data_types/index.md b/docs/ja/data_types/special_data_types/index.md new file mode 120000 index 00000000000..f3ca4a47f98 --- /dev/null +++ b/docs/ja/data_types/special_data_types/index.md @@ -0,0 +1 @@ +../../../en/data_types/special_data_types/index.md \ No newline at end of file diff --git a/docs/ja/data_types/special_data_types/interval.md b/docs/ja/data_types/special_data_types/interval.md new file mode 120000 index 00000000000..6829f5ced00 --- /dev/null +++ b/docs/ja/data_types/special_data_types/interval.md @@ -0,0 +1 @@ +../../../en/data_types/special_data_types/interval.md \ No newline at end of file diff --git a/docs/ja/data_types/special_data_types/nothing.md b/docs/ja/data_types/special_data_types/nothing.md new file mode 120000 index 00000000000..197a752ce9c --- /dev/null +++ b/docs/ja/data_types/special_data_types/nothing.md @@ -0,0 +1 @@ +../../../en/data_types/special_data_types/nothing.md \ No newline at end of file diff --git a/docs/ja/data_types/special_data_types/set.md b/docs/ja/data_types/special_data_types/set.md new file mode 120000 index 00000000000..5beb14114d3 --- /dev/null +++ b/docs/ja/data_types/special_data_types/set.md @@ -0,0 +1 @@ +../../../en/data_types/special_data_types/set.md \ No newline at end of file diff --git a/docs/ja/data_types/string.md b/docs/ja/data_types/string.md new file mode 120000 index 00000000000..7bdd739398f --- /dev/null +++ b/docs/ja/data_types/string.md @@ -0,0 +1 @@ +../../en/data_types/string.md \ No newline at end of file diff --git a/docs/ja/data_types/tuple.md b/docs/ja/data_types/tuple.md new file mode 120000 index 00000000000..d30a8463aeb --- /dev/null +++ b/docs/ja/data_types/tuple.md @@ -0,0 +1 @@ +../../en/data_types/tuple.md \ No newline at end of file diff --git a/docs/ja/data_types/uuid.md b/docs/ja/data_types/uuid.md new file mode 120000 index 00000000000..aba05e889ac --- /dev/null +++ b/docs/ja/data_types/uuid.md @@ -0,0 +1 @@ +../../en/data_types/uuid.md \ No newline at end of file diff --git a/docs/ja/database_engines/index.md b/docs/ja/database_engines/index.md new file mode 120000 index 00000000000..bbdb762a4ad --- /dev/null +++ b/docs/ja/database_engines/index.md @@ -0,0 +1 @@ +../../en/database_engines/index.md \ No newline at end of file diff --git a/docs/ja/database_engines/lazy.md b/docs/ja/database_engines/lazy.md new file mode 120000 index 00000000000..66830dcdb2f --- /dev/null +++ b/docs/ja/database_engines/lazy.md @@ -0,0 +1 @@ +../../en/database_engines/lazy.md \ No newline at end of file diff --git a/docs/ja/database_engines/mysql.md b/docs/ja/database_engines/mysql.md new file mode 120000 index 00000000000..51ac4126e2d --- /dev/null +++ b/docs/ja/database_engines/mysql.md @@ -0,0 +1 @@ +../../en/database_engines/mysql.md \ No newline at end of file diff --git a/docs/ja/development/architecture.md b/docs/ja/development/architecture.md new file mode 120000 index 00000000000..abda4dd48a8 --- /dev/null +++ b/docs/ja/development/architecture.md @@ -0,0 +1 @@ +../../en/development/architecture.md \ No newline at end of file diff --git a/docs/ja/development/build.md b/docs/ja/development/build.md new file mode 120000 index 00000000000..480dbc2e9f5 --- /dev/null +++ b/docs/ja/development/build.md @@ -0,0 +1 @@ +../../en/development/build.md \ No newline at end of file diff --git a/docs/ja/development/build_cross_arm.md b/docs/ja/development/build_cross_arm.md new file mode 120000 index 00000000000..983a9872dc1 --- /dev/null +++ b/docs/ja/development/build_cross_arm.md @@ -0,0 +1 @@ +../../en/development/build_cross_arm.md \ No newline at end of file diff --git a/docs/ja/development/build_cross_osx.md b/docs/ja/development/build_cross_osx.md new file mode 120000 index 00000000000..72e64e8631f --- /dev/null +++ b/docs/ja/development/build_cross_osx.md @@ -0,0 +1 @@ +../../en/development/build_cross_osx.md \ No newline at end of file diff --git a/docs/ja/development/build_osx.md b/docs/ja/development/build_osx.md new file mode 120000 index 00000000000..f9adaf24584 --- /dev/null +++ b/docs/ja/development/build_osx.md @@ -0,0 +1 @@ +../../en/development/build_osx.md \ No newline at end of file diff --git a/docs/ja/development/contrib.md b/docs/ja/development/contrib.md new file mode 120000 index 00000000000..4749f95f9ef --- /dev/null +++ b/docs/ja/development/contrib.md @@ -0,0 +1 @@ +../../en/development/contrib.md \ No newline at end of file diff --git a/docs/ja/development/developer_instruction.md b/docs/ja/development/developer_instruction.md new file mode 120000 index 00000000000..bdfa9047aa2 --- /dev/null +++ b/docs/ja/development/developer_instruction.md @@ -0,0 +1 @@ +../../en/development/developer_instruction.md \ No newline at end of file diff --git a/docs/ja/development/index.md b/docs/ja/development/index.md new file mode 120000 index 00000000000..1e2ad97dcc5 --- /dev/null +++ b/docs/ja/development/index.md @@ -0,0 +1 @@ +../../en/development/index.md \ No newline at end of file diff --git a/docs/ja/development/style.md b/docs/ja/development/style.md new file mode 120000 index 00000000000..c1bbf11f421 --- /dev/null +++ b/docs/ja/development/style.md @@ -0,0 +1 @@ +../../en/development/style.md \ No newline at end of file diff --git a/docs/ja/development/tests.md b/docs/ja/development/tests.md new file mode 120000 index 00000000000..c03d36c3916 --- /dev/null +++ b/docs/ja/development/tests.md @@ -0,0 +1 @@ +../../en/development/tests.md \ No newline at end of file diff --git a/docs/ja/faq/general.md b/docs/ja/faq/general.md new file mode 120000 index 00000000000..bc267395b1b --- /dev/null +++ b/docs/ja/faq/general.md @@ -0,0 +1 @@ +../../en/faq/general.md \ No newline at end of file diff --git a/docs/ja/getting_started/example_datasets/amplab_benchmark.md b/docs/ja/getting_started/example_datasets/amplab_benchmark.md new file mode 120000 index 00000000000..78c93906bb0 --- /dev/null +++ b/docs/ja/getting_started/example_datasets/amplab_benchmark.md @@ -0,0 +1 @@ +../../../en/getting_started/example_datasets/amplab_benchmark.md \ No newline at end of file diff --git a/docs/ja/getting_started/example_datasets/criteo.md b/docs/ja/getting_started/example_datasets/criteo.md new file mode 120000 index 00000000000..507dc68cd62 --- /dev/null +++ b/docs/ja/getting_started/example_datasets/criteo.md @@ -0,0 +1 @@ +../../../en/getting_started/example_datasets/criteo.md \ No newline at end of file diff --git a/docs/ja/getting_started/example_datasets/metrica.md b/docs/ja/getting_started/example_datasets/metrica.md new file mode 120000 index 00000000000..984023973eb --- /dev/null +++ b/docs/ja/getting_started/example_datasets/metrica.md @@ -0,0 +1 @@ +../../../en/getting_started/example_datasets/metrica.md \ No newline at end of file diff --git a/docs/ja/getting_started/example_datasets/nyc_taxi.md b/docs/ja/getting_started/example_datasets/nyc_taxi.md new file mode 120000 index 00000000000..c47fc83a293 --- /dev/null +++ b/docs/ja/getting_started/example_datasets/nyc_taxi.md @@ -0,0 +1 @@ +../../../en/getting_started/example_datasets/nyc_taxi.md \ No newline at end of file diff --git a/docs/ja/getting_started/example_datasets/ontime.md b/docs/ja/getting_started/example_datasets/ontime.md new file mode 120000 index 00000000000..87cfbb8be91 --- /dev/null +++ b/docs/ja/getting_started/example_datasets/ontime.md @@ -0,0 +1 @@ +../../../en/getting_started/example_datasets/ontime.md \ No newline at end of file diff --git a/docs/ja/getting_started/example_datasets/star_schema.md b/docs/ja/getting_started/example_datasets/star_schema.md new file mode 120000 index 00000000000..1c26392dd23 --- /dev/null +++ b/docs/ja/getting_started/example_datasets/star_schema.md @@ -0,0 +1 @@ +../../../en/getting_started/example_datasets/star_schema.md \ No newline at end of file diff --git a/docs/ja/getting_started/example_datasets/wikistat.md b/docs/ja/getting_started/example_datasets/wikistat.md new file mode 120000 index 00000000000..bf6e780fb27 --- /dev/null +++ b/docs/ja/getting_started/example_datasets/wikistat.md @@ -0,0 +1 @@ +../../../en/getting_started/example_datasets/wikistat.md \ No newline at end of file diff --git a/docs/ja/getting_started/index.md b/docs/ja/getting_started/index.md new file mode 120000 index 00000000000..1acedb0f03e --- /dev/null +++ b/docs/ja/getting_started/index.md @@ -0,0 +1 @@ +../../en/getting_started/index.md \ No newline at end of file diff --git a/docs/ja/getting_started/install.md b/docs/ja/getting_started/install.md new file mode 120000 index 00000000000..60aa3fb93a4 --- /dev/null +++ b/docs/ja/getting_started/install.md @@ -0,0 +1 @@ +../../en/getting_started/install.md \ No newline at end of file diff --git a/docs/ja/getting_started/tutorial.md b/docs/ja/getting_started/tutorial.md new file mode 120000 index 00000000000..8bc40816ab2 --- /dev/null +++ b/docs/ja/getting_started/tutorial.md @@ -0,0 +1 @@ +../../en/getting_started/tutorial.md \ No newline at end of file diff --git a/docs/ja/guides/apply_catboost_model.md b/docs/ja/guides/apply_catboost_model.md new file mode 120000 index 00000000000..dd36e885974 --- /dev/null +++ b/docs/ja/guides/apply_catboost_model.md @@ -0,0 +1 @@ +../../en/guides/apply_catboost_model.md \ No newline at end of file diff --git a/docs/ja/guides/index.md b/docs/ja/guides/index.md new file mode 120000 index 00000000000..162dcbc3b8f --- /dev/null +++ b/docs/ja/guides/index.md @@ -0,0 +1 @@ +../../en/guides/index.md \ No newline at end of file diff --git a/docs/ja/images/column_oriented.gif b/docs/ja/images/column_oriented.gif new file mode 100644 index 00000000000..15f4b12e697 Binary files /dev/null and b/docs/ja/images/column_oriented.gif differ diff --git a/docs/ja/images/logo.svg b/docs/ja/images/logo.svg new file mode 100644 index 00000000000..70662da887e --- /dev/null +++ b/docs/ja/images/logo.svg @@ -0,0 +1,12 @@ + + + + + + + + + diff --git a/docs/ja/images/row_oriented.gif b/docs/ja/images/row_oriented.gif new file mode 100644 index 00000000000..53daa20f322 Binary files /dev/null and b/docs/ja/images/row_oriented.gif differ diff --git a/docs/ja/index.md b/docs/ja/index.md index 6dea5f6570b..a7f8681a2bf 100644 --- a/docs/ja/index.md +++ b/docs/ja/index.md @@ -139,4 +139,4 @@ LIMIT 20 CPU効率のために、クエリ言語は宣言型(SQLまたはMDX)、または少なくともベクトル(J、K)でなければなりません。 クエリには、最適化を可能にする暗黙的なループのみを含める必要があります。 -[Original article](https://clickhouse.yandex/docs/en/) +[Original article](https://clickhouse.yandex/docs/ja/) diff --git a/docs/ja/interfaces/cli.md b/docs/ja/interfaces/cli.md new file mode 120000 index 00000000000..04588066828 --- /dev/null +++ b/docs/ja/interfaces/cli.md @@ -0,0 +1 @@ +../../en/interfaces/cli.md \ No newline at end of file diff --git a/docs/ja/interfaces/cpp.md b/docs/ja/interfaces/cpp.md new file mode 120000 index 00000000000..581e50e774d --- /dev/null +++ b/docs/ja/interfaces/cpp.md @@ -0,0 +1 @@ +../../en/interfaces/cpp.md \ No newline at end of file diff --git a/docs/ja/interfaces/formats.md b/docs/ja/interfaces/formats.md new file mode 120000 index 00000000000..41a65ebe579 --- /dev/null +++ b/docs/ja/interfaces/formats.md @@ -0,0 +1 @@ +../../en/interfaces/formats.md \ No newline at end of file diff --git a/docs/ja/interfaces/http.md b/docs/ja/interfaces/http.md new file mode 120000 index 00000000000..fb293841d8b --- /dev/null +++ b/docs/ja/interfaces/http.md @@ -0,0 +1 @@ +../../en/interfaces/http.md \ No newline at end of file diff --git a/docs/ja/interfaces/index.md b/docs/ja/interfaces/index.md new file mode 120000 index 00000000000..61537763cac --- /dev/null +++ b/docs/ja/interfaces/index.md @@ -0,0 +1 @@ +../../en/interfaces/index.md \ No newline at end of file diff --git a/docs/ja/interfaces/jdbc.md b/docs/ja/interfaces/jdbc.md new file mode 120000 index 00000000000..27dfe0cfa5a --- /dev/null +++ b/docs/ja/interfaces/jdbc.md @@ -0,0 +1 @@ +../../en/interfaces/jdbc.md \ No newline at end of file diff --git a/docs/ja/interfaces/odbc.md b/docs/ja/interfaces/odbc.md new file mode 120000 index 00000000000..5ff7610e061 --- /dev/null +++ b/docs/ja/interfaces/odbc.md @@ -0,0 +1 @@ +../../en/interfaces/odbc.md \ No newline at end of file diff --git a/docs/ja/interfaces/tcp.md b/docs/ja/interfaces/tcp.md new file mode 120000 index 00000000000..a0529a856e4 --- /dev/null +++ b/docs/ja/interfaces/tcp.md @@ -0,0 +1 @@ +../../en/interfaces/tcp.md \ No newline at end of file diff --git a/docs/ja/interfaces/third-party/client_libraries.md b/docs/ja/interfaces/third-party/client_libraries.md new file mode 120000 index 00000000000..5320bbe1e16 --- /dev/null +++ b/docs/ja/interfaces/third-party/client_libraries.md @@ -0,0 +1 @@ +../../../en/interfaces/third-party/client_libraries.md \ No newline at end of file diff --git a/docs/ja/interfaces/third-party/gui.md b/docs/ja/interfaces/third-party/gui.md new file mode 120000 index 00000000000..ef7bc904197 --- /dev/null +++ b/docs/ja/interfaces/third-party/gui.md @@ -0,0 +1 @@ +../../../en/interfaces/third-party/gui.md \ No newline at end of file diff --git a/docs/ja/interfaces/third-party/integrations.md b/docs/ja/interfaces/third-party/integrations.md new file mode 120000 index 00000000000..9cd0a21e676 --- /dev/null +++ b/docs/ja/interfaces/third-party/integrations.md @@ -0,0 +1 @@ +../../../en/interfaces/third-party/integrations.md \ No newline at end of file diff --git a/docs/ja/interfaces/third-party/proxy.md b/docs/ja/interfaces/third-party/proxy.md new file mode 120000 index 00000000000..877f1b51dab --- /dev/null +++ b/docs/ja/interfaces/third-party/proxy.md @@ -0,0 +1 @@ +../../../en/interfaces/third-party/proxy.md \ No newline at end of file diff --git a/docs/ja/introduction/distinctive_features.md b/docs/ja/introduction/distinctive_features.md new file mode 120000 index 00000000000..9cf00a2a00f --- /dev/null +++ b/docs/ja/introduction/distinctive_features.md @@ -0,0 +1 @@ +../../en/introduction/distinctive_features.md \ No newline at end of file diff --git a/docs/ja/introduction/features_considered_disadvantages.md b/docs/ja/introduction/features_considered_disadvantages.md new file mode 120000 index 00000000000..45d3cdf563a --- /dev/null +++ b/docs/ja/introduction/features_considered_disadvantages.md @@ -0,0 +1 @@ +../../en/introduction/features_considered_disadvantages.md \ No newline at end of file diff --git a/docs/ja/introduction/history.md b/docs/ja/introduction/history.md new file mode 120000 index 00000000000..7004e990a59 --- /dev/null +++ b/docs/ja/introduction/history.md @@ -0,0 +1 @@ +../../en/introduction/history.md \ No newline at end of file diff --git a/docs/ja/introduction/performance.md b/docs/ja/introduction/performance.md new file mode 120000 index 00000000000..cb2912bcb81 --- /dev/null +++ b/docs/ja/introduction/performance.md @@ -0,0 +1 @@ +../../en/introduction/performance.md \ No newline at end of file diff --git a/docs/ja/operations/access_rights.md b/docs/ja/operations/access_rights.md new file mode 120000 index 00000000000..73463029569 --- /dev/null +++ b/docs/ja/operations/access_rights.md @@ -0,0 +1 @@ +../../en/operations/access_rights.md \ No newline at end of file diff --git a/docs/ja/operations/backup.md b/docs/ja/operations/backup.md new file mode 120000 index 00000000000..1003fb30e61 --- /dev/null +++ b/docs/ja/operations/backup.md @@ -0,0 +1 @@ +../../en/operations/backup.md \ No newline at end of file diff --git a/docs/ja/operations/configuration_files.md b/docs/ja/operations/configuration_files.md new file mode 120000 index 00000000000..a2d73dbaa25 --- /dev/null +++ b/docs/ja/operations/configuration_files.md @@ -0,0 +1 @@ +../../en/operations/configuration_files.md \ No newline at end of file diff --git a/docs/ja/operations/index.md b/docs/ja/operations/index.md new file mode 120000 index 00000000000..ce854687b86 --- /dev/null +++ b/docs/ja/operations/index.md @@ -0,0 +1 @@ +../../en/operations/index.md \ No newline at end of file diff --git a/docs/ja/operations/monitoring.md b/docs/ja/operations/monitoring.md new file mode 120000 index 00000000000..515ae8b4fff --- /dev/null +++ b/docs/ja/operations/monitoring.md @@ -0,0 +1 @@ +../../en/operations/monitoring.md \ No newline at end of file diff --git a/docs/ja/operations/quotas.md b/docs/ja/operations/quotas.md new file mode 120000 index 00000000000..1c52cdf1e91 --- /dev/null +++ b/docs/ja/operations/quotas.md @@ -0,0 +1 @@ +../../en/operations/quotas.md \ No newline at end of file diff --git a/docs/ja/operations/requirements.md b/docs/ja/operations/requirements.md new file mode 120000 index 00000000000..a71283af25c --- /dev/null +++ b/docs/ja/operations/requirements.md @@ -0,0 +1 @@ +../../en/operations/requirements.md \ No newline at end of file diff --git a/docs/ja/operations/server_settings/index.md b/docs/ja/operations/server_settings/index.md new file mode 120000 index 00000000000..1d1a0585a42 --- /dev/null +++ b/docs/ja/operations/server_settings/index.md @@ -0,0 +1 @@ +../../../en/operations/server_settings/index.md \ No newline at end of file diff --git a/docs/ja/operations/server_settings/settings.md b/docs/ja/operations/server_settings/settings.md new file mode 120000 index 00000000000..19cd2e82ce7 --- /dev/null +++ b/docs/ja/operations/server_settings/settings.md @@ -0,0 +1 @@ +../../../en/operations/server_settings/settings.md \ No newline at end of file diff --git a/docs/ja/operations/settings/constraints_on_settings.md b/docs/ja/operations/settings/constraints_on_settings.md new file mode 120000 index 00000000000..4dacf908662 --- /dev/null +++ b/docs/ja/operations/settings/constraints_on_settings.md @@ -0,0 +1 @@ +../../../en/operations/settings/constraints_on_settings.md \ No newline at end of file diff --git a/docs/ja/operations/settings/index.md b/docs/ja/operations/settings/index.md new file mode 120000 index 00000000000..fc3968d1f1e --- /dev/null +++ b/docs/ja/operations/settings/index.md @@ -0,0 +1 @@ +../../../en/operations/settings/index.md \ No newline at end of file diff --git a/docs/ja/operations/settings/permissions_for_queries.md b/docs/ja/operations/settings/permissions_for_queries.md new file mode 120000 index 00000000000..ce8473bf01c --- /dev/null +++ b/docs/ja/operations/settings/permissions_for_queries.md @@ -0,0 +1 @@ +../../../en/operations/settings/permissions_for_queries.md \ No newline at end of file diff --git a/docs/ja/operations/settings/query_complexity.md b/docs/ja/operations/settings/query_complexity.md new file mode 120000 index 00000000000..9a9c6d975a9 --- /dev/null +++ b/docs/ja/operations/settings/query_complexity.md @@ -0,0 +1 @@ +../../../en/operations/settings/query_complexity.md \ No newline at end of file diff --git a/docs/ja/operations/settings/settings.md b/docs/ja/operations/settings/settings.md new file mode 120000 index 00000000000..0c8df3cfc90 --- /dev/null +++ b/docs/ja/operations/settings/settings.md @@ -0,0 +1 @@ +../../../en/operations/settings/settings.md \ No newline at end of file diff --git a/docs/ja/operations/settings/settings_profiles.md b/docs/ja/operations/settings/settings_profiles.md new file mode 120000 index 00000000000..35d9747ad56 --- /dev/null +++ b/docs/ja/operations/settings/settings_profiles.md @@ -0,0 +1 @@ +../../../en/operations/settings/settings_profiles.md \ No newline at end of file diff --git a/docs/ja/operations/settings/settings_users.md b/docs/ja/operations/settings/settings_users.md new file mode 120000 index 00000000000..3a6a7cf6948 --- /dev/null +++ b/docs/ja/operations/settings/settings_users.md @@ -0,0 +1 @@ +../../../en/operations/settings/settings_users.md \ No newline at end of file diff --git a/docs/ja/operations/system_tables.md b/docs/ja/operations/system_tables.md new file mode 120000 index 00000000000..c5701190dca --- /dev/null +++ b/docs/ja/operations/system_tables.md @@ -0,0 +1 @@ +../../en/operations/system_tables.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/aggregatingmergetree.md b/docs/ja/operations/table_engines/aggregatingmergetree.md new file mode 120000 index 00000000000..907a073e0c8 --- /dev/null +++ b/docs/ja/operations/table_engines/aggregatingmergetree.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/aggregatingmergetree.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/buffer.md b/docs/ja/operations/table_engines/buffer.md new file mode 120000 index 00000000000..0a3c372fa67 --- /dev/null +++ b/docs/ja/operations/table_engines/buffer.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/buffer.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/collapsingmergetree.md b/docs/ja/operations/table_engines/collapsingmergetree.md new file mode 120000 index 00000000000..ef5cebb48d8 --- /dev/null +++ b/docs/ja/operations/table_engines/collapsingmergetree.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/collapsingmergetree.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/custom_partitioning_key.md b/docs/ja/operations/table_engines/custom_partitioning_key.md new file mode 120000 index 00000000000..a9d18cacb25 --- /dev/null +++ b/docs/ja/operations/table_engines/custom_partitioning_key.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/custom_partitioning_key.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/dictionary.md b/docs/ja/operations/table_engines/dictionary.md new file mode 120000 index 00000000000..2a95f4a669b --- /dev/null +++ b/docs/ja/operations/table_engines/dictionary.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/dictionary.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/distributed.md b/docs/ja/operations/table_engines/distributed.md new file mode 120000 index 00000000000..46994303c35 --- /dev/null +++ b/docs/ja/operations/table_engines/distributed.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/distributed.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/external_data.md b/docs/ja/operations/table_engines/external_data.md new file mode 120000 index 00000000000..27a7b6acec2 --- /dev/null +++ b/docs/ja/operations/table_engines/external_data.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/external_data.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/file.md b/docs/ja/operations/table_engines/file.md new file mode 120000 index 00000000000..27dffc8d78f --- /dev/null +++ b/docs/ja/operations/table_engines/file.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/file.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/graphitemergetree.md b/docs/ja/operations/table_engines/graphitemergetree.md new file mode 120000 index 00000000000..654425d050a --- /dev/null +++ b/docs/ja/operations/table_engines/graphitemergetree.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/graphitemergetree.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/hdfs.md b/docs/ja/operations/table_engines/hdfs.md new file mode 120000 index 00000000000..d4dbfa46e68 --- /dev/null +++ b/docs/ja/operations/table_engines/hdfs.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/hdfs.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/index.md b/docs/ja/operations/table_engines/index.md new file mode 120000 index 00000000000..994dff9b516 --- /dev/null +++ b/docs/ja/operations/table_engines/index.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/index.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/jdbc.md b/docs/ja/operations/table_engines/jdbc.md new file mode 120000 index 00000000000..5165d704b9a --- /dev/null +++ b/docs/ja/operations/table_engines/jdbc.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/jdbc.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/join.md b/docs/ja/operations/table_engines/join.md new file mode 120000 index 00000000000..0914ab950ed --- /dev/null +++ b/docs/ja/operations/table_engines/join.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/join.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/kafka.md b/docs/ja/operations/table_engines/kafka.md new file mode 120000 index 00000000000..cb7bd5dd0f8 --- /dev/null +++ b/docs/ja/operations/table_engines/kafka.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/kafka.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/log.md b/docs/ja/operations/table_engines/log.md new file mode 120000 index 00000000000..2c39ba68522 --- /dev/null +++ b/docs/ja/operations/table_engines/log.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/log.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/log_family.md b/docs/ja/operations/table_engines/log_family.md new file mode 120000 index 00000000000..8c5b5f0365b --- /dev/null +++ b/docs/ja/operations/table_engines/log_family.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/log_family.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/materializedview.md b/docs/ja/operations/table_engines/materializedview.md new file mode 120000 index 00000000000..e3b5deb73dc --- /dev/null +++ b/docs/ja/operations/table_engines/materializedview.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/materializedview.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/memory.md b/docs/ja/operations/table_engines/memory.md new file mode 120000 index 00000000000..eee940c7bd3 --- /dev/null +++ b/docs/ja/operations/table_engines/memory.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/memory.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/merge.md b/docs/ja/operations/table_engines/merge.md new file mode 120000 index 00000000000..9e17d9bb939 --- /dev/null +++ b/docs/ja/operations/table_engines/merge.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/merge.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/mergetree.md b/docs/ja/operations/table_engines/mergetree.md new file mode 120000 index 00000000000..cc6ac1e5297 --- /dev/null +++ b/docs/ja/operations/table_engines/mergetree.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/mergetree.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/mysql.md b/docs/ja/operations/table_engines/mysql.md new file mode 120000 index 00000000000..e4c268658cf --- /dev/null +++ b/docs/ja/operations/table_engines/mysql.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/mysql.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/null.md b/docs/ja/operations/table_engines/null.md new file mode 120000 index 00000000000..c7d9264571e --- /dev/null +++ b/docs/ja/operations/table_engines/null.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/null.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/odbc.md b/docs/ja/operations/table_engines/odbc.md new file mode 120000 index 00000000000..06091fd5377 --- /dev/null +++ b/docs/ja/operations/table_engines/odbc.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/odbc.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/replacingmergetree.md b/docs/ja/operations/table_engines/replacingmergetree.md new file mode 120000 index 00000000000..63ff25a4dd6 --- /dev/null +++ b/docs/ja/operations/table_engines/replacingmergetree.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/replacingmergetree.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/replication.md b/docs/ja/operations/table_engines/replication.md new file mode 120000 index 00000000000..b4b22ac708b --- /dev/null +++ b/docs/ja/operations/table_engines/replication.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/replication.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/set.md b/docs/ja/operations/table_engines/set.md new file mode 120000 index 00000000000..d37e659badd --- /dev/null +++ b/docs/ja/operations/table_engines/set.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/set.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/stripelog.md b/docs/ja/operations/table_engines/stripelog.md new file mode 120000 index 00000000000..f6521a41e3e --- /dev/null +++ b/docs/ja/operations/table_engines/stripelog.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/stripelog.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/summingmergetree.md b/docs/ja/operations/table_engines/summingmergetree.md new file mode 120000 index 00000000000..2b67e953d8a --- /dev/null +++ b/docs/ja/operations/table_engines/summingmergetree.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/summingmergetree.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/tinylog.md b/docs/ja/operations/table_engines/tinylog.md new file mode 120000 index 00000000000..bda90c7d5ce --- /dev/null +++ b/docs/ja/operations/table_engines/tinylog.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/tinylog.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/url.md b/docs/ja/operations/table_engines/url.md new file mode 120000 index 00000000000..d0de71dcf40 --- /dev/null +++ b/docs/ja/operations/table_engines/url.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/url.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/versionedcollapsingmergetree.md b/docs/ja/operations/table_engines/versionedcollapsingmergetree.md new file mode 120000 index 00000000000..5843fba70b8 --- /dev/null +++ b/docs/ja/operations/table_engines/versionedcollapsingmergetree.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/versionedcollapsingmergetree.md \ No newline at end of file diff --git a/docs/ja/operations/table_engines/view.md b/docs/ja/operations/table_engines/view.md new file mode 120000 index 00000000000..3f2164181a7 --- /dev/null +++ b/docs/ja/operations/table_engines/view.md @@ -0,0 +1 @@ +../../../en/operations/table_engines/view.md \ No newline at end of file diff --git a/docs/ja/operations/tips.md b/docs/ja/operations/tips.md new file mode 120000 index 00000000000..9b3413bdbc3 --- /dev/null +++ b/docs/ja/operations/tips.md @@ -0,0 +1 @@ +../../en/operations/tips.md \ No newline at end of file diff --git a/docs/ja/operations/troubleshooting.md b/docs/ja/operations/troubleshooting.md new file mode 120000 index 00000000000..84f0ff34f41 --- /dev/null +++ b/docs/ja/operations/troubleshooting.md @@ -0,0 +1 @@ +../../en/operations/troubleshooting.md \ No newline at end of file diff --git a/docs/ja/operations/update.md b/docs/ja/operations/update.md new file mode 120000 index 00000000000..88a092c0dff --- /dev/null +++ b/docs/ja/operations/update.md @@ -0,0 +1 @@ +../../en/operations/update.md \ No newline at end of file diff --git a/docs/ja/operations/utils/clickhouse-copier.md b/docs/ja/operations/utils/clickhouse-copier.md new file mode 120000 index 00000000000..c9e89e33c7b --- /dev/null +++ b/docs/ja/operations/utils/clickhouse-copier.md @@ -0,0 +1 @@ +../../../en/operations/utils/clickhouse-copier.md \ No newline at end of file diff --git a/docs/ja/operations/utils/clickhouse-local.md b/docs/ja/operations/utils/clickhouse-local.md new file mode 120000 index 00000000000..032aaaa2b84 --- /dev/null +++ b/docs/ja/operations/utils/clickhouse-local.md @@ -0,0 +1 @@ +../../../en/operations/utils/clickhouse-local.md \ No newline at end of file diff --git a/docs/ja/operations/utils/index.md b/docs/ja/operations/utils/index.md new file mode 120000 index 00000000000..dd089d1ef4b --- /dev/null +++ b/docs/ja/operations/utils/index.md @@ -0,0 +1 @@ +../../../en/operations/utils/index.md \ No newline at end of file diff --git a/docs/ja/query_language/agg_functions/combinators.md b/docs/ja/query_language/agg_functions/combinators.md new file mode 120000 index 00000000000..2b914cebd15 --- /dev/null +++ b/docs/ja/query_language/agg_functions/combinators.md @@ -0,0 +1 @@ +../../../en/query_language/agg_functions/combinators.md \ No newline at end of file diff --git a/docs/ja/query_language/agg_functions/index.md b/docs/ja/query_language/agg_functions/index.md new file mode 120000 index 00000000000..2fcf67abdeb --- /dev/null +++ b/docs/ja/query_language/agg_functions/index.md @@ -0,0 +1 @@ +../../../en/query_language/agg_functions/index.md \ No newline at end of file diff --git a/docs/ja/query_language/agg_functions/parametric_functions.md b/docs/ja/query_language/agg_functions/parametric_functions.md new file mode 120000 index 00000000000..fd3ffafcc5b --- /dev/null +++ b/docs/ja/query_language/agg_functions/parametric_functions.md @@ -0,0 +1 @@ +../../../en/query_language/agg_functions/parametric_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/agg_functions/reference.md b/docs/ja/query_language/agg_functions/reference.md new file mode 120000 index 00000000000..c5651cb0793 --- /dev/null +++ b/docs/ja/query_language/agg_functions/reference.md @@ -0,0 +1 @@ +../../../en/query_language/agg_functions/reference.md \ No newline at end of file diff --git a/docs/ja/query_language/alter.md b/docs/ja/query_language/alter.md new file mode 120000 index 00000000000..44f4ecf9737 --- /dev/null +++ b/docs/ja/query_language/alter.md @@ -0,0 +1 @@ +../../en/query_language/alter.md \ No newline at end of file diff --git a/docs/ja/query_language/create.md b/docs/ja/query_language/create.md new file mode 120000 index 00000000000..a13304d176e --- /dev/null +++ b/docs/ja/query_language/create.md @@ -0,0 +1 @@ +../../en/query_language/create.md \ No newline at end of file diff --git a/docs/ja/query_language/dicts/external_dicts.md b/docs/ja/query_language/dicts/external_dicts.md new file mode 120000 index 00000000000..491b94bffe6 --- /dev/null +++ b/docs/ja/query_language/dicts/external_dicts.md @@ -0,0 +1 @@ +../../../en/query_language/dicts/external_dicts.md \ No newline at end of file diff --git a/docs/ja/query_language/dicts/external_dicts_dict.md b/docs/ja/query_language/dicts/external_dicts_dict.md new file mode 120000 index 00000000000..e27820fee60 --- /dev/null +++ b/docs/ja/query_language/dicts/external_dicts_dict.md @@ -0,0 +1 @@ +../../../en/query_language/dicts/external_dicts_dict.md \ No newline at end of file diff --git a/docs/ja/query_language/dicts/external_dicts_dict_layout.md b/docs/ja/query_language/dicts/external_dicts_dict_layout.md new file mode 120000 index 00000000000..e391c5be723 --- /dev/null +++ b/docs/ja/query_language/dicts/external_dicts_dict_layout.md @@ -0,0 +1 @@ +../../../en/query_language/dicts/external_dicts_dict_layout.md \ No newline at end of file diff --git a/docs/ja/query_language/dicts/external_dicts_dict_lifetime.md b/docs/ja/query_language/dicts/external_dicts_dict_lifetime.md new file mode 120000 index 00000000000..03b53c09077 --- /dev/null +++ b/docs/ja/query_language/dicts/external_dicts_dict_lifetime.md @@ -0,0 +1 @@ +../../../en/query_language/dicts/external_dicts_dict_lifetime.md \ No newline at end of file diff --git a/docs/ja/query_language/dicts/external_dicts_dict_sources.md b/docs/ja/query_language/dicts/external_dicts_dict_sources.md new file mode 120000 index 00000000000..d4f4bf8ef3e --- /dev/null +++ b/docs/ja/query_language/dicts/external_dicts_dict_sources.md @@ -0,0 +1 @@ +../../../en/query_language/dicts/external_dicts_dict_sources.md \ No newline at end of file diff --git a/docs/ja/query_language/dicts/external_dicts_dict_structure.md b/docs/ja/query_language/dicts/external_dicts_dict_structure.md new file mode 120000 index 00000000000..69ff759caea --- /dev/null +++ b/docs/ja/query_language/dicts/external_dicts_dict_structure.md @@ -0,0 +1 @@ +../../../en/query_language/dicts/external_dicts_dict_structure.md \ No newline at end of file diff --git a/docs/ja/query_language/dicts/index.md b/docs/ja/query_language/dicts/index.md new file mode 120000 index 00000000000..fdc188ca2a2 --- /dev/null +++ b/docs/ja/query_language/dicts/index.md @@ -0,0 +1 @@ +../../../en/query_language/dicts/index.md \ No newline at end of file diff --git a/docs/ja/query_language/dicts/internal_dicts.md b/docs/ja/query_language/dicts/internal_dicts.md new file mode 120000 index 00000000000..3f9408dcd45 --- /dev/null +++ b/docs/ja/query_language/dicts/internal_dicts.md @@ -0,0 +1 @@ +../../../en/query_language/dicts/internal_dicts.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/arithmetic_functions.md b/docs/ja/query_language/functions/arithmetic_functions.md new file mode 120000 index 00000000000..c22acb8c7f5 --- /dev/null +++ b/docs/ja/query_language/functions/arithmetic_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/arithmetic_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/array_functions.md b/docs/ja/query_language/functions/array_functions.md new file mode 120000 index 00000000000..268b2295a97 --- /dev/null +++ b/docs/ja/query_language/functions/array_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/array_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/array_join.md b/docs/ja/query_language/functions/array_join.md new file mode 120000 index 00000000000..b100dac784d --- /dev/null +++ b/docs/ja/query_language/functions/array_join.md @@ -0,0 +1 @@ +../../../en/query_language/functions/array_join.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/bit_functions.md b/docs/ja/query_language/functions/bit_functions.md new file mode 120000 index 00000000000..b5cccd0c56c --- /dev/null +++ b/docs/ja/query_language/functions/bit_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/bit_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/bitmap_functions.md b/docs/ja/query_language/functions/bitmap_functions.md new file mode 120000 index 00000000000..0a31d3d71d8 --- /dev/null +++ b/docs/ja/query_language/functions/bitmap_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/bitmap_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/comparison_functions.md b/docs/ja/query_language/functions/comparison_functions.md new file mode 120000 index 00000000000..417c589867c --- /dev/null +++ b/docs/ja/query_language/functions/comparison_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/comparison_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/conditional_functions.md b/docs/ja/query_language/functions/conditional_functions.md new file mode 120000 index 00000000000..ad0d775dbb5 --- /dev/null +++ b/docs/ja/query_language/functions/conditional_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/conditional_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/date_time_functions.md b/docs/ja/query_language/functions/date_time_functions.md new file mode 120000 index 00000000000..d11b9b8bb6b --- /dev/null +++ b/docs/ja/query_language/functions/date_time_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/date_time_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/encoding_functions.md b/docs/ja/query_language/functions/encoding_functions.md new file mode 120000 index 00000000000..b2e6be1405b --- /dev/null +++ b/docs/ja/query_language/functions/encoding_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/encoding_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/ext_dict_functions.md b/docs/ja/query_language/functions/ext_dict_functions.md new file mode 120000 index 00000000000..6318f900e4b --- /dev/null +++ b/docs/ja/query_language/functions/ext_dict_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/ext_dict_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/functions_for_nulls.md b/docs/ja/query_language/functions/functions_for_nulls.md new file mode 120000 index 00000000000..fa57e10ad15 --- /dev/null +++ b/docs/ja/query_language/functions/functions_for_nulls.md @@ -0,0 +1 @@ +../../../en/query_language/functions/functions_for_nulls.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/geo.md b/docs/ja/query_language/functions/geo.md new file mode 120000 index 00000000000..86fa3a85d34 --- /dev/null +++ b/docs/ja/query_language/functions/geo.md @@ -0,0 +1 @@ +../../../en/query_language/functions/geo.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/hash_functions.md b/docs/ja/query_language/functions/hash_functions.md new file mode 120000 index 00000000000..90de8ba97e7 --- /dev/null +++ b/docs/ja/query_language/functions/hash_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/hash_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/higher_order_functions.md b/docs/ja/query_language/functions/higher_order_functions.md new file mode 120000 index 00000000000..077feba2a3e --- /dev/null +++ b/docs/ja/query_language/functions/higher_order_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/higher_order_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/in_functions.md b/docs/ja/query_language/functions/in_functions.md new file mode 120000 index 00000000000..3ae5f24dbca --- /dev/null +++ b/docs/ja/query_language/functions/in_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/in_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/index.md b/docs/ja/query_language/functions/index.md new file mode 120000 index 00000000000..a4e9d619cc0 --- /dev/null +++ b/docs/ja/query_language/functions/index.md @@ -0,0 +1 @@ +../../../en/query_language/functions/index.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/introspection.md b/docs/ja/query_language/functions/introspection.md new file mode 120000 index 00000000000..b1a487e9c77 --- /dev/null +++ b/docs/ja/query_language/functions/introspection.md @@ -0,0 +1 @@ +../../../en/query_language/functions/introspection.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/ip_address_functions.md b/docs/ja/query_language/functions/ip_address_functions.md new file mode 120000 index 00000000000..b58175a7cdf --- /dev/null +++ b/docs/ja/query_language/functions/ip_address_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/ip_address_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/json_functions.md b/docs/ja/query_language/functions/json_functions.md new file mode 120000 index 00000000000..1b37184e006 --- /dev/null +++ b/docs/ja/query_language/functions/json_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/json_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/logical_functions.md b/docs/ja/query_language/functions/logical_functions.md new file mode 120000 index 00000000000..32015440e09 --- /dev/null +++ b/docs/ja/query_language/functions/logical_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/logical_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/machine_learning_functions.md b/docs/ja/query_language/functions/machine_learning_functions.md new file mode 120000 index 00000000000..4509602717e --- /dev/null +++ b/docs/ja/query_language/functions/machine_learning_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/machine_learning_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/math_functions.md b/docs/ja/query_language/functions/math_functions.md new file mode 120000 index 00000000000..e01674eca4d --- /dev/null +++ b/docs/ja/query_language/functions/math_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/math_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/other_functions.md b/docs/ja/query_language/functions/other_functions.md new file mode 120000 index 00000000000..65164784ced --- /dev/null +++ b/docs/ja/query_language/functions/other_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/other_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/random_functions.md b/docs/ja/query_language/functions/random_functions.md new file mode 120000 index 00000000000..b873e0c86ac --- /dev/null +++ b/docs/ja/query_language/functions/random_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/random_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/rounding_functions.md b/docs/ja/query_language/functions/rounding_functions.md new file mode 120000 index 00000000000..e1217e3b25a --- /dev/null +++ b/docs/ja/query_language/functions/rounding_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/rounding_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/splitting_merging_functions.md b/docs/ja/query_language/functions/splitting_merging_functions.md new file mode 120000 index 00000000000..5f8771abdec --- /dev/null +++ b/docs/ja/query_language/functions/splitting_merging_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/splitting_merging_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/string_functions.md b/docs/ja/query_language/functions/string_functions.md new file mode 120000 index 00000000000..cc4104aaf53 --- /dev/null +++ b/docs/ja/query_language/functions/string_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/string_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/string_replace_functions.md b/docs/ja/query_language/functions/string_replace_functions.md new file mode 120000 index 00000000000..4ec963ffd0f --- /dev/null +++ b/docs/ja/query_language/functions/string_replace_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/string_replace_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/string_search_functions.md b/docs/ja/query_language/functions/string_search_functions.md new file mode 120000 index 00000000000..0a2c7f4c4f1 --- /dev/null +++ b/docs/ja/query_language/functions/string_search_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/string_search_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/type_conversion_functions.md b/docs/ja/query_language/functions/type_conversion_functions.md new file mode 120000 index 00000000000..fcf51570d15 --- /dev/null +++ b/docs/ja/query_language/functions/type_conversion_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/type_conversion_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/url_functions.md b/docs/ja/query_language/functions/url_functions.md new file mode 120000 index 00000000000..529e4ffdd53 --- /dev/null +++ b/docs/ja/query_language/functions/url_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/url_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/uuid_functions.md b/docs/ja/query_language/functions/uuid_functions.md new file mode 120000 index 00000000000..95e3ded0477 --- /dev/null +++ b/docs/ja/query_language/functions/uuid_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/uuid_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/functions/ym_dict_functions.md b/docs/ja/query_language/functions/ym_dict_functions.md new file mode 120000 index 00000000000..ec5ddc84479 --- /dev/null +++ b/docs/ja/query_language/functions/ym_dict_functions.md @@ -0,0 +1 @@ +../../../en/query_language/functions/ym_dict_functions.md \ No newline at end of file diff --git a/docs/ja/query_language/index.md b/docs/ja/query_language/index.md new file mode 120000 index 00000000000..44dfff9bb18 --- /dev/null +++ b/docs/ja/query_language/index.md @@ -0,0 +1 @@ +../../en/query_language/index.md \ No newline at end of file diff --git a/docs/ja/query_language/insert_into.md b/docs/ja/query_language/insert_into.md new file mode 120000 index 00000000000..29b47662b0d --- /dev/null +++ b/docs/ja/query_language/insert_into.md @@ -0,0 +1 @@ +../../en/query_language/insert_into.md \ No newline at end of file diff --git a/docs/ja/query_language/misc.md b/docs/ja/query_language/misc.md new file mode 120000 index 00000000000..3bd814f3568 --- /dev/null +++ b/docs/ja/query_language/misc.md @@ -0,0 +1 @@ +../../en/query_language/misc.md \ No newline at end of file diff --git a/docs/ja/query_language/operators.md b/docs/ja/query_language/operators.md new file mode 120000 index 00000000000..f94df928a82 --- /dev/null +++ b/docs/ja/query_language/operators.md @@ -0,0 +1 @@ +../../en/query_language/operators.md \ No newline at end of file diff --git a/docs/ja/query_language/select.md b/docs/ja/query_language/select.md new file mode 120000 index 00000000000..c8ec8369383 --- /dev/null +++ b/docs/ja/query_language/select.md @@ -0,0 +1 @@ +../../en/query_language/select.md \ No newline at end of file diff --git a/docs/ja/query_language/show.md b/docs/ja/query_language/show.md new file mode 120000 index 00000000000..4c2f4cf2c4f --- /dev/null +++ b/docs/ja/query_language/show.md @@ -0,0 +1 @@ +../../en/query_language/show.md \ No newline at end of file diff --git a/docs/ja/query_language/syntax.md b/docs/ja/query_language/syntax.md new file mode 120000 index 00000000000..5307fd51ae8 --- /dev/null +++ b/docs/ja/query_language/syntax.md @@ -0,0 +1 @@ +../../en/query_language/syntax.md \ No newline at end of file diff --git a/docs/ja/query_language/system.md b/docs/ja/query_language/system.md new file mode 120000 index 00000000000..6061858c3f2 --- /dev/null +++ b/docs/ja/query_language/system.md @@ -0,0 +1 @@ +../../en/query_language/system.md \ No newline at end of file diff --git a/docs/ja/query_language/table_functions/file.md b/docs/ja/query_language/table_functions/file.md new file mode 120000 index 00000000000..a514547109a --- /dev/null +++ b/docs/ja/query_language/table_functions/file.md @@ -0,0 +1 @@ +../../../en/query_language/table_functions/file.md \ No newline at end of file diff --git a/docs/ja/query_language/table_functions/hdfs.md b/docs/ja/query_language/table_functions/hdfs.md new file mode 120000 index 00000000000..2616e737eb6 --- /dev/null +++ b/docs/ja/query_language/table_functions/hdfs.md @@ -0,0 +1 @@ +../../../en/query_language/table_functions/hdfs.md \ No newline at end of file diff --git a/docs/ja/query_language/table_functions/index.md b/docs/ja/query_language/table_functions/index.md new file mode 120000 index 00000000000..89b22522859 --- /dev/null +++ b/docs/ja/query_language/table_functions/index.md @@ -0,0 +1 @@ +../../../en/query_language/table_functions/index.md \ No newline at end of file diff --git a/docs/ja/query_language/table_functions/input.md b/docs/ja/query_language/table_functions/input.md new file mode 120000 index 00000000000..f23cc8ee673 --- /dev/null +++ b/docs/ja/query_language/table_functions/input.md @@ -0,0 +1 @@ +../../../en/query_language/table_functions/input.md \ No newline at end of file diff --git a/docs/ja/query_language/table_functions/jdbc.md b/docs/ja/query_language/table_functions/jdbc.md new file mode 120000 index 00000000000..73bec80ca58 --- /dev/null +++ b/docs/ja/query_language/table_functions/jdbc.md @@ -0,0 +1 @@ +../../../en/query_language/table_functions/jdbc.md \ No newline at end of file diff --git a/docs/ja/query_language/table_functions/merge.md b/docs/ja/query_language/table_functions/merge.md new file mode 120000 index 00000000000..383f6c88331 --- /dev/null +++ b/docs/ja/query_language/table_functions/merge.md @@ -0,0 +1 @@ +../../../en/query_language/table_functions/merge.md \ No newline at end of file diff --git a/docs/ja/query_language/table_functions/mysql.md b/docs/ja/query_language/table_functions/mysql.md new file mode 120000 index 00000000000..75c032cc63f --- /dev/null +++ b/docs/ja/query_language/table_functions/mysql.md @@ -0,0 +1 @@ +../../../en/query_language/table_functions/mysql.md \ No newline at end of file diff --git a/docs/ja/query_language/table_functions/numbers.md b/docs/ja/query_language/table_functions/numbers.md new file mode 120000 index 00000000000..a679b915669 --- /dev/null +++ b/docs/ja/query_language/table_functions/numbers.md @@ -0,0 +1 @@ +../../../en/query_language/table_functions/numbers.md \ No newline at end of file diff --git a/docs/ja/query_language/table_functions/odbc.md b/docs/ja/query_language/table_functions/odbc.md new file mode 120000 index 00000000000..7620f920494 --- /dev/null +++ b/docs/ja/query_language/table_functions/odbc.md @@ -0,0 +1 @@ +../../../en/query_language/table_functions/odbc.md \ No newline at end of file diff --git a/docs/ja/query_language/table_functions/remote.md b/docs/ja/query_language/table_functions/remote.md new file mode 120000 index 00000000000..b157c4076d3 --- /dev/null +++ b/docs/ja/query_language/table_functions/remote.md @@ -0,0 +1 @@ +../../../en/query_language/table_functions/remote.md \ No newline at end of file diff --git a/docs/ja/query_language/table_functions/url.md b/docs/ja/query_language/table_functions/url.md new file mode 120000 index 00000000000..038e08f7ba9 --- /dev/null +++ b/docs/ja/query_language/table_functions/url.md @@ -0,0 +1 @@ +../../../en/query_language/table_functions/url.md \ No newline at end of file diff --git a/docs/ja/roadmap.md b/docs/ja/roadmap.md new file mode 120000 index 00000000000..24df86352b3 --- /dev/null +++ b/docs/ja/roadmap.md @@ -0,0 +1 @@ +../en/roadmap.md \ No newline at end of file diff --git a/docs/ja/security_changelog.md b/docs/ja/security_changelog.md new file mode 120000 index 00000000000..101a4f4e48c --- /dev/null +++ b/docs/ja/security_changelog.md @@ -0,0 +1 @@ +../en/security_changelog.md \ No newline at end of file diff --git a/docs/redirects.txt b/docs/redirects.txt index 0ff077b660c..b38f6d242f2 100644 --- a/docs/redirects.txt +++ b/docs/redirects.txt @@ -1,3 +1,4 @@ +introduction/ya_metrika_task.md introduction/history.md system_tables.md operations/system_tables.md system_tables/system.asynchronous_metrics.md operations/system_tables.md system_tables/system.clusters.md operations/system_tables.md diff --git a/docs/ru/data_types/special_data_types/interval.md b/docs/ru/data_types/special_data_types/interval.md new file mode 100644 index 00000000000..6762f9bc850 --- /dev/null +++ b/docs/ru/data_types/special_data_types/interval.md @@ -0,0 +1,74 @@ +# Interval {#data-type-interval} + +Семейство типов данных, представляющих интервалы дат и времени. Оператор [INTERVAL](../../query_language/operators.md#operator-interval) возвращает значения этих типов. + +!!! warning "Внимание" + Нельзя использовать типы данных `Interval` для хранения данных в таблице. + +Структура: + +- Интервал времени в виде положительного целого числа. +- Тип интервала. + +Поддержанные типы интервалов: + +- `SECOND` +- `MINUTE` +- `HOUR` +- `DAY` +- `WEEK` +- `MONTH` +- `QUARTER` +- `YEAR` + +Каждому типу интервала соответствует отдельный тип данных. Например, тип данных `IntervalDay` соответствует интервалу `DAY`: + +```sql +SELECT toTypeName(INTERVAL 4 DAY) +``` +```text +┌─toTypeName(toIntervalDay(4))─┐ +│ IntervalDay │ +└──────────────────────────────┘ +``` + +## Использование {#data-type-interval-usage-remarks} + +Значения типов `Interval` можно использовать в арифметических операциях со значениями типов [Date](../../data_types/date.md) и [DateTime](../../data_types/datetime.md). Например, можно добавить 4 дня к текущей дате: + +```sql +SELECT now() as current_date_time, current_date_time + INTERVAL 4 DAY +``` +```text +┌───current_date_time─┬─plus(now(), toIntervalDay(4))─┐ +│ 2019-10-23 10:58:45 │ 2019-10-27 10:58:45 │ +└─────────────────────┴───────────────────────────────┘ +``` + +Нельзя объединять интервалы различных типов. Нельзя использовать интервалы вида `4 DAY 1 HOUR`. Вместо этого выражайте интервал в единицах меньших или равных минимальной единице интервала, например, интервал "1 день и 1 час" можно выразить как `25 HOUR` или `90000 SECOND`. + +Арифметические операции со значениями типов `Interval` не доступны, однако можно последовательно добавлять различные интервалы к значениям типов `Date` и `DateTime`. Например: + +```sql +SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR +``` +```text +┌───current_date_time─┬─plus(plus(now(), toIntervalDay(4)), toIntervalHour(3))─┐ +│ 2019-10-23 11:16:28 │ 2019-10-27 14:16:28 │ +└─────────────────────┴────────────────────────────────────────────────────────┘ +``` + +Следующий запрос приведёт к генерированию исключения: + +```sql +select now() AS current_date_time, current_date_time + (INTERVAL 4 DAY + INTERVAL 3 HOUR) +``` +```text +Received exception from server (version 19.14.1): +Code: 43. DB::Exception: Received from localhost:9000. DB::Exception: Wrong argument types for function plus: if one argument is Interval, then another must be Date or DateTime.. +``` + +## Смотрите также + +- Оператор[INTERVAL](../../query_language/operators.md#operator-interval) +- Функция приведения типа [toInterval](../../query_language/functions/type_conversion_functions.md#function-tointerval) diff --git a/docs/ru/development/build_cross.md b/docs/ru/development/build_cross.md deleted file mode 120000 index f595f252de3..00000000000 --- a/docs/ru/development/build_cross.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/build_cross.md \ No newline at end of file diff --git a/docs/ru/development/build_cross_osx.md b/docs/ru/development/build_cross_osx.md new file mode 120000 index 00000000000..72e64e8631f --- /dev/null +++ b/docs/ru/development/build_cross_osx.md @@ -0,0 +1 @@ +../../en/development/build_cross_osx.md \ No newline at end of file diff --git a/docs/ru/extended_roadmap.md b/docs/ru/extended_roadmap.md index 9a8297e41d4..ca5390a5213 100644 --- a/docs/ru/extended_roadmap.md +++ b/docs/ru/extended_roadmap.md @@ -52,11 +52,11 @@ ### 1.8. Перенос между разделами по TTL. -Делает [Владимир Чеботарёв](https://github.com/excitoon), Altinity. +Делает [Владимир Чеботарёв](https://github.com/excitoon), Altinity. Декабрь 2019. ### 1.9. Использование TTL для прореживания данных. -В очереди. +Будет делать Сорокин Николай, ВШЭ и Яндекс. Сейчас пользователь может задать в таблице выражение, которое определяет, сколько времени хранятся данные. Обычно это выражение задаётся относительно значения столбца с датой - например: удалять данные через три месяца. https://clickhouse.yandex/docs/ru/operations/table_engines/mergetree/#table_engine-mergetree-ttl @@ -160,7 +160,7 @@ ClickHouse использует небольшое подмножество фу ### 2.9. Логгировние в format-стиле. -В задаче заинтересован [Александр Кузьменков](https://github.com/akuzm). Нет прогресса. +Делает [Иван Лежанкин](https://github.com/abyss7). Низкий приоритет. ### 2.10. Запрашивать у таблиц не столбцы, а срезы. @@ -203,9 +203,9 @@ ClickHouse использует небольшое подмножество фу [Иван Блинков](https://github.com/blinkov/) - очень хороший человек. Сам сайт документации основан на технологиях, не удовлетворяющих требованиям задачи, и эти технологии трудно исправить. -### 3.4. Добавить японский язык в документацию. +### 3.4. + Добавить японский язык в документацию. -Эту задачу сделает [Иван Блинков](https://github.com/blinkov/), до конца ноября 2019. +Эту задачу сделает [Иван Блинков](https://github.com/blinkov/), до конца декабря 2019. Сделано. ## 4. Сетевое взаимодействие. @@ -257,7 +257,7 @@ ClickHouse использует небольшое подмножество фу ### 5.3. Встроенная ручка для Prometheus и, возможно, Solomon. -Простая задача. +Простая задача. https://github.com/Vdimir ### 5.4. Opt-in сообщать в клиенте, если вышла новая версия. @@ -270,7 +270,8 @@ ClickHouse использует небольшое подмножество фу ### 6.1. Исправления сэмплирующего профайлера запросов. -Михаил Филимонов, Altinity. Ноябрь 2019. +Михаил Филимонов, Altinity. Ноябрь 2019. Сделано. +Осталось ещё проверить работоспособность профайлера в первом потоке (что важно для INSERT). ### 6.2. Добавление memory profiler. @@ -364,6 +365,7 @@ UBSan включен в функциональных тестах, но не в Мы используем -Wall -Wextra -Weverything -Werror. При сборке с clang, -Weverything уже включено. Но в gcc есть уникальные warning-и, отсутствующие в clang. Wolf Kreuzerkrieg. Возможно, его уже не интересует эта задача. +Низкий приоритет. Возможно, будет отменено. ### 7.14. Альтернатива для readline и libedit. @@ -464,14 +466,14 @@ Fuzzing тестирование - это тестирование случай 1. Добавление в SQL диалект ClickHouse функций для генерации случайных данных (пример - случайные бинарные строки заданной длины, случайные валидные UTF-8 строки) и "порчи" данных (например, поменять значения случайных бит с заданной частотой). Это будет использовано для тестирования SQL-функций ClickHouse. -Можно добавить функции: -`randomString(length)` -`randomFixedString(length)` - - строка заданной длины с равномерно распределёнными случайными байтами; -`randomStringASCII(length)` +Можно добавить функции: +`randomString(length)` +`randomFixedString(length)` + - строка заданной длины с равномерно распределёнными случайными байтами; +`randomStringASCII(length)` `randomStringUTF8(length)` -`fuzzBits(s, inverse_probability)` - изменить каждый бит строки на противоположный с заданной вероятностью; +`fuzzBits(s, inverse_probability)` - изменить каждый бит строки на противоположный с заданной вероятностью; `fuzzBytes(s, inverse_probability)` - изменить каждый байт строки на равномерно случайный с заданной вероятностью; У каждой функции опциональный аргумент против склейки одинаковых выражений в запросе. @@ -562,7 +564,7 @@ Fuzzing тестирование - это тестирование случай ### 8.4. Унификация File, HDFS, S3 под URL. -### 8.5. Аутентификация в S3. +### 8.5. + Аутентификация в S3. [Владимир Чеботарёв](https://github.com/excitoon), Altinity. @@ -570,16 +572,18 @@ Fuzzing тестирование - это тестирование случай Андрей Коняев, ArenaData. -### 8.7. Исправление мелочи HDFS на очень старых ядрах Linux. +### 8.7. + Исправление мелочи HDFS на очень старых ядрах Linux. В ядрах 2.6 отсутствует один системный вызов, который библиотека hdfs3 использует без необходимости. -Тривиально, но исполнителя ещё нет. +Сделал Amos Bird. ### 8.8. Поддержка виртуальных столбцов с именем файла и путём. [Ольга Хвостикова](https://github.com/stavrolia). -### 8.9. Поддержка сжатых файлов (gz, bz) на чтение и запись. +### 8.9. + Поддержка сжатых файлов (gz, bz) на чтение и запись. + +Сделал [Andrey Bodrov](https://github.com/apbodrov) ### 8.10. Запись в табличную функцию ODBC. @@ -617,7 +621,9 @@ Fuzzing тестирование - это тестирование случай Встроенная в ClickHouse возможность работать в качестве реплики MySQL даст преимущества для дальнейшего развития. -### 8.18. ClickHouse как Federated MySQL. +### 8.18. + ClickHouse как Federated MySQL. + +Maxim Fedotov, Wargaming + Yuri Baranov, Яндекс. ### 8.19. Интеграция с RabbitMQ. @@ -642,7 +648,7 @@ Fuzzing тестирование - это тестирование случай ## 9. Безопасность. -### 9.1. Ограничение на хосты в запросах ко внешним системам. +### 9.1. + Ограничение на хосты в запросах ко внешним системам. Михаил Коротов. @@ -760,7 +766,7 @@ ClickHouse предоставляет возможность обратитьс ### 11.9. Доработки ODBC драйвера. -Денис Глазачев, Altinity. +Денис Глазачев, Altinity. Хороший прогресс по этой задаче. ### 11.10. Преднастроенные HTTP handlers для запросов. @@ -873,7 +879,9 @@ zhang2014 ### 14.17. Ввести понятие stateful функций. +zhang2014. Для runningDifference, neighbour - их учёт в оптимизаторе запросов. +В интерфейсе уже сделано. Надо проверить, что учитывается в нужных местах (например, что работает predicate pushdown сквозь ORDER BY, если таких функций нет). ### 14.18. UNION DISTINCT и возможность включить его по-умолчанию. @@ -911,7 +919,7 @@ zhang2014 ### 15.5. Использование ключа таблицы для оптимизации merge JOIN. -### 15.6. SEMI и ANTI JOIN. +### 15.6. + SEMI и ANTI JOIN. Артём Зуйков. @@ -954,7 +962,7 @@ ClickHouse не является geospatial СУБД. Тем не менее, в Реализовать в ClickHouse типы данных для задач обработки геоинформационных данных: Point, Line, MultiLine, Polygon и операции над ними - проверка вхождения, пересечения. Вариантом минимум будет реализация этих операций в евклидовой системе координат. Дополнительно - на сфере и WGS84. -### 17.3. Ускорение greatCircleDistance. +### 17.3. + Ускорение greatCircleDistance. [Ольга Хвостикова](https://github.com/stavrolia), основано на коде Андрея Аксёнова, получено разрешение на использование кода. @@ -1050,9 +1058,9 @@ Hold. Полезно для заказчиков внутри Яндекса, н ## 21. Оптимизации производительности. -### 21.1. Параллельный парсинг форматов. +### 21.1. + Параллельный парсинг форматов. -Начинал Олег Ершов, доделывает Никита Михайлов, помогает [Александр Кузьменков](https://github.com/akuzm). Почти всё готово. +Начинал Олег Ершов, доделывает Никита Михайлов, помогает [Александр Кузьменков](https://github.com/akuzm). Готово. ### 21.2. Параллельное форматирование форматов. @@ -1074,6 +1082,8 @@ Hold. Полезно для заказчиков внутри Яндекса, н ### 21.5. Распараллеливание INSERT при INSERT SELECT, если это необходимо. +[Vxider](https://github.com/Vxider), ICT + ### 21.6. Уменьшение числа потоков для SELECT в случае тривиального INSERT SELECT. ### 21.7. Кэш результатов запросов. @@ -1098,7 +1108,7 @@ Hold. Полезно для заказчиков внутри Яндекса, н [Николай Кочетов](https://github.com/KochetovNicolai). Требует 2.1. -### 21.10. Улучшение эвристики PREWHERE. +### 21.10. + Улучшение эвристики PREWHERE. Amos Bird. @@ -1197,9 +1207,9 @@ zhang2014. ## 22. Долги и недоделанные возможности. -### 22.1. Исправление неработающих таймаутов, если используется TLS. +### 22.1. + Исправление неработающих таймаутов, если используется TLS. -Сейчас смотрит [Александр Сапин](https://github.com/alesapin), но он может делегировать задачу кому угодно. Нужно для Яндекс.Облака. +Нужно для Яндекс.Облака. Сделал Алексей Миловидов. ### 22.2. Убрать возможность изменить настройки в native протоколе в случае readonly. @@ -1235,31 +1245,36 @@ zhang2014. Требует 6.3., но можно улучшить отдельными хаками. Нужно Метрике и БК. -### 22.11. Более простая ser/de настроек запросов. +### 22.11. + Более простая ser/de настроек запросов. -[Виталий Баранов](https://github.com/vitlibar), почти всё готово. +И пропуск неизвестных настроек. Важно для Метрики для упрощения апгрейда без изменения конфига. +[Виталий Баранов](https://github.com/vitlibar), готово. -### 22.12. Исправление низкой производительности чтения из Kafka. - -[Иван Лежанкин](https://github.com/abyss7). +### 22.12. + Исправление низкой производительности чтения из Kafka. Для ClickHouse нехарактерно наличие кода, обладающего столь низкой производительностью. Практики разработки не подразумевают, что такой код должен попасть в продакшен без надлежащего тестирования производительности. -### 22.13. Посмотреть, почему не работают некоторые collations. +Изначально было назначено на [Ивана Лежанкина](https://github.com/abyss7), но по неизвестной причине было не сделано в течение нескольких месяцев. +Сделал Михаил Филимонов, Altinity. -[Иван Лежанкин](https://github.com/abyss7), совмещается с 7.1. +### 22.13. + Посмотреть, почему не работают некоторые collations. + +Изначально было назначено на [Ивана Лежанкина](https://github.com/abyss7), но в результате сделал Александр Сапин. ### 22.14. Посмотреть, почему не работает StorageSet для MergeTree таблиц при некоторых условиях. +Вроде бы сделал Никита Михайлов - проверить существующие issues на эту тему. + + ### 22.15. Нормализация коммитов в Kafka и идемпотентности операций. [Иван Лежанкин](https://github.com/abyss7), если он не сдастся. ### 22.16. Исправление низкой производительности кодека DoubleDelta. -Василий Немков, Altinity - временно приостановлено, но намерения остаются в силе. +Василий Немков, Altinity - в процессе. -Мы считаем важным, что код в ClickHouse содержит разумные оптимизации, основанные на анализе производительности. Но иногда бывают досадные исключения. +Мы считаем важным, что код в ClickHouse содержит разумные оптимизации, основанные на анализе производительности. Но иногда бывают досадные исключения. ### 22.17. Консистентно работающий POPULATE для MaterializedView. @@ -1267,11 +1282,11 @@ zhang2014. Василий Немков, Altinity. -### 22.19. Одновременное использование SAMPLE и PREWHERE. +### 22.19. + Одновременное использование SAMPLE и PREWHERE. Нужно для Метрики. [Николай Кочетов](https://github.com/KochetovNicolai), ноябрь 2019. -### 22.20. Неправильная работа PREWHERE при некоторых условиях. +### 22.20. + Неправильная работа PREWHERE при некоторых условиях. [Николай Кочетов](https://github.com/KochetovNicolai), декабрь 2019. @@ -1283,6 +1298,9 @@ zhang2014. После 10.14. +https://github.com/ClickHouse/ClickHouse/issues/7237 +https://github.com/ClickHouse/ClickHouse/issues/2655 + ### 22.23. Правильная обработка Nullable в функциях, которые кидают исключение на default значении: modulo, intDiv. ### 22.24. Излишняя фильтрация ODBC connection string. @@ -1305,6 +1323,10 @@ zhang2014. [Иван Лежанкин](https://github.com/abyss7). +### 22.29. Уязвимость DDL для словарей executable. + +[Александр Сапин](https://github.com/alesapin) + ## 23. Default Festival. @@ -1577,13 +1599,14 @@ Amos Bird, но его решение слишком громоздкое и п ## 25. DevRel -### 25.1. Перевод инструкции для начинающих разработчиков. +### 25.1. + Перевод инструкции для начинающих разработчиков. Александр Казаков, ноябрь 2019. ### 25.2. Вычитка и выкладка статьи про обфускацию данных на английском. Эми, Александр Казаков, Алексей Миловидов, ноябрь 2019. +Готово к выкладке. ### 25.3. Подготовка статьи "Секреты оптимизации производительности ClickHouse". @@ -1603,7 +1626,7 @@ Amos Bird, но его решение слишком громоздкое и п Эми -### 25.8. Выступление keynote на BDTC. +### 25.8. + Выступление keynote на BDTC. Алексей Миловидов diff --git a/docs/ru/getting_started/index.md b/docs/ru/getting_started/index.md index c03ac58f24b..a8d0fbaa5b1 100644 --- a/docs/ru/getting_started/index.md +++ b/docs/ru/getting_started/index.md @@ -1,138 +1,10 @@ # Начало работы -## Системные требования +Если вы новичок в ClickHouse и хотите получить вживую оценить его производительность, прежде всего нужно пройти через [процесс установки](install.md). -ClickHouse может работать на любом Linux, FreeBSD или Mac OS X с архитектурой процессора x86\_64. +После этого можно выбрать один из следующих вариантов: -Хотя предсобранные релизы обычно компилируются с использованием набора инструкций SSE 4.2, что добавляет использование поддерживающего его процессора в список системных требований. Команда для проверки наличия поддержки инструкций SSE 4.2 на текущем процессоре: - -```bash -$ grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not supported" -``` - -## Установка - -### Из DEB пакетов - -Яндекс рекомендует использовать официальные скомпилированные `deb` пакеты для Debian или Ubuntu. - -Чтобы установить официальные пакеты, пропишите репозиторий Яндекса в `/etc/apt/sources.list` или в отдельный файл `/etc/apt/sources.list.d/clickhouse.list`: - -```bash -$ deb http://repo.yandex.ru/clickhouse/deb/stable/ main/ -``` - -Если вы хотите использовать наиболее свежую тестовую, замените `stable` на `testing` (не рекомендуется для production окружений). - -Затем для самой установки пакетов выполните: - -```bash -$ sudo apt-get install dirmngr # optional -$ sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E0C56BD4 # optional -$ sudo apt-get update -$ sudo apt-get install clickhouse-client clickhouse-server -``` - -Также эти пакеты можно скачать и установить вручную отсюда: . - -### Из RPM пакетов - -Команда ClickHouse в Яндексе рекомендует использовать официальные предкомпилированные `rpm` пакеты для CentOS, RedHad и всех остальных дистрибутивов Linux, основанных на rpm. - -Сначала нужно подключить официальный репозиторий: -```bash -$ sudo yum install yum-utils -$ sudo rpm --import https://repo.yandex.ru/clickhouse/CLICKHOUSE-KEY.GPG -$ sudo yum-config-manager --add-repo https://repo.yandex.ru/clickhouse/rpm/stable/x86_64 -``` - -Для использования наиболее свежих версий нужно заменить `stable` на `testing` (рекомендуется для тестовых окружений). - -Then run these commands to actually install packages: -Для, собственно, установки пакетов необходимо выполнить следующие команды: - -```bash -$ sudo yum install clickhouse-server clickhouse-client -``` - -Также есть возможность установить пакеты вручную, скачав отсюда: . - -### Из Docker образа - -Для запуска ClickHouse в Docker нужно следовать инструкции на [Docker Hub](https://hub.docker.com/r/yandex/clickhouse-server/). Внутри образов используются официальные `deb` пакеты. - -### Из исходного кода - -Для компиляции ClickHouse вручную, используйте инструкцию для [Linux](../development/build.md) или [Mac OS X](../development/build_osx.md). - -Можно скомпилировать пакеты и установить их, либо использовать программы без установки пакетов. Также при ручной сборке можно отключить необходимость поддержки набора инструкций SSE 4.2 или собрать под процессоры архитектуры AArch64. - -```text -Client: dbms/programs/clickhouse-client -Server: dbms/programs/clickhouse-server -``` - -Для работы собранного вручную сервера необходимо создать директории для данных и метаданных, а также сделать их `chown` для желаемого пользователя. Пути к этим директориям могут быть изменены в конфигурационном файле сервера (src/dbms/programs/server/config.xml), по умолчанию используются следующие: - -```text -/opt/clickhouse/data/default/ -/opt/clickhouse/metadata/default/ -``` - -На Gentoo для установки ClickHouse из исходного кода можно использовать просто `emerge clickhouse`. - -## Запуск - -Для запуска сервера в качестве демона, выполните: - -```bash -$ sudo service clickhouse-server start -``` - -Смотрите логи в директории `/var/log/clickhouse-server/`. - -Если сервер не стартует, проверьте корректность конфигурации в файле `/etc/clickhouse-server/config.xml` - -Также можно запустить сервер вручную из консоли: - -```bash -$ clickhouse-server --config-file=/etc/clickhouse-server/config.xml -``` - -При этом, лог будет выводиться в консоль, что удобно для разработки. -Если конфигурационный файл лежит в текущей директории, то указывать параметр `--config-file` не требуется, по умолчанию будет использован файл `./config.xml`. - -После запуска сервера, соединиться с ним можно с помощью клиента командной строки: - -```bash -$ clickhouse-client -``` - -По умолчанию он соединяется с localhost:9000, от имени пользователя `default` без пароля. Также клиент может быть использован для соединения с удалённым сервером с помощью аргумента `--host`. - -Терминал должен использовать кодировку UTF-8. - -Более подробная информация о клиенте располагается в разделе [«Клиент командной строки»](../interfaces/cli.md). - -Пример проверки работоспособности системы: - -```bash -$ ./clickhouse-client -ClickHouse client version 0.0.18749. -Connecting to localhost:9000. -Connected to ClickHouse server version 0.0.18749. -``` -```sql -SELECT 1 -``` -```text -┌─1─┐ -│ 1 │ -└───┘ -``` - -**Поздравляем, система работает!** - -Для дальнейших экспериментов можно попробовать загрузить один из тестовых наборов данных или пройти [пошаговое руководство для начинающих](https://clickhouse.yandex/tutorial.html). +* [Пройти подробное руководство для начинающих](tutorial.md) +* [Поэкспериментировать с тестовыми наборами данных](example_datasets/ontime.md) [Оригинальная статья](https://clickhouse.yandex/docs/ru/getting_started/) diff --git a/docs/ru/getting_started/install.md b/docs/ru/getting_started/install.md new file mode 100644 index 00000000000..29ccd2b14f4 --- /dev/null +++ b/docs/ru/getting_started/install.md @@ -0,0 +1,144 @@ +# Установка + +## Системные требования + +ClickHouse может работать на любой операционной системе Linux, FreeBSD или Mac OS X с архитектурой процессора x86\_64, AArch64 или PowerPC64LE. + +Предварительно собранные пакеты компилируются для x86\_64 и используют набор инструкций SSE 4.2, поэтому, если не указано иное, его поддержка в используемом процессоре, становится дополнительным требованием к системе. Вот команда, чтобы проверить, поддерживает ли текущий процессор SSE 4.2: + +``` bash +$ grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not supported" +``` + +Чтобы запустить ClickHouse на процессорах, которые не поддерживают SSE 4.2, либо имеют архитектуру AArch64 или PowerPC64LE, необходимо самостоятельно [собрать ClickHouse из исходного кода](#from-sources) с соответствующими настройками конфигурации. + +## Доступные варианты установки + +### Из DEB пакетов {#from-deb-packages} + +Яндекс рекомендует использовать официальные скомпилированные `deb` пакеты для Debian или Ubuntu. + +Чтобы установить официальные пакеты, пропишите репозиторий Яндекса в `/etc/apt/sources.list` или в отдельный файл `/etc/apt/sources.list.d/clickhouse.list`: + +``` +deb http://repo.yandex.ru/clickhouse/deb/stable/ main/ +``` + +Если вы хотите использовать наиболее свежую тестовую, замените `stable` на `testing` (не рекомендуется для production окружений). + +Затем для самой установки пакетов выполните: + +```bash +sudo apt-get install dirmngr # optional +sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E0C56BD4 # optional +sudo apt-get update +sudo apt-get install clickhouse-client clickhouse-server +``` + +Также эти пакеты можно скачать и установить вручную отсюда: . + +### Из RPM пакетов {#from-rpm-packages} + +Команда ClickHouse в Яндексе рекомендует использовать официальные предкомпилированные `rpm` пакеты для CentOS, RedHad и всех остальных дистрибутивов Linux, основанных на rpm. + +Сначала нужно подключить официальный репозиторий: +```bash +sudo yum install yum-utils +sudo rpm --import https://repo.yandex.ru/clickhouse/CLICKHOUSE-KEY.GPG +sudo yum-config-manager --add-repo https://repo.yandex.ru/clickhouse/rpm/stable/x86_64 +``` + +Для использования наиболее свежих версий нужно заменить `stable` на `testing` (рекомендуется для тестовых окружений). + +Then run these commands to actually install packages: +Для, собственно, установки пакетов необходимо выполнить следующие команды: + +```bash +sudo yum install clickhouse-server clickhouse-client +``` + +Также есть возможность установить пакеты вручную, скачав отсюда: . + +### Из Docker образа {#from-docker-image} + +Для запуска ClickHouse в Docker нужно следовать инструкции на [Docker Hub](https://hub.docker.com/r/yandex/clickhouse-server/). Внутри образов используются официальные `deb` пакеты. + +### Из исходного кода {#from-sources} + +Для компиляции ClickHouse вручную, используйте инструкцию для [Linux](../development/build.md) или [Mac OS X](../development/build_osx.md). + +Можно скомпилировать пакеты и установить их, либо использовать программы без установки пакетов. Также при ручой сборке можно отключить необходимость поддержки набора инструкций SSE 4.2 или собрать под процессоры архитектуры AArch64. + +``` +Client: dbms/programs/clickhouse-client +Server: dbms/programs/clickhouse-server +``` + +Для работы собранного вручную сервера необходимо создать директории для данных и метаданных, а также сделать их `chown` для желаемого пользователя. Пути к этим директориям могут быть изменены в конфигурационном файле сервера (src/dbms/programs/server/config.xml), по умолчанию используются следующие: + +``` +/opt/clickhouse/data/default/ +/opt/clickhouse/metadata/default/ +``` + +На Gentoo для установки ClickHouse из исходного кода можно использовать просто `emerge clickhouse`. + +## Запуск + +Для запуска сервера в качестве демона, выполните: + +``` bash +$ sudo service clickhouse-server start +``` + +Смотрите логи в директории `/var/log/clickhouse-server/`. + +Если сервер не стартует, проверьте корректность конфигурации в файле `/etc/clickhouse-server/config.xml` + +Также можно запустить сервер вручную из консоли: + +``` bash +$ clickhouse-server --config-file=/etc/clickhouse-server/config.xml +``` + +При этом, лог будет выводиться в консоль, что удобно для разработки. +Если конфигурационный файл лежит в текущей директории, то указывать параметр `--config-file` не требуется, по умолчанию будет использован файл `./config.xml`. + +После запуска сервера, соединиться с ним можно с помощью клиента командной строки: + +``` bash +$ clickhouse-client +``` + +По умолчанию он соединяется с localhost:9000, от имени пользователя `default` без пароля. Также клиент может быть использован для соединения с удалённым сервером с помощью аргумента `--host`. + +Терминал должен использовать кодировку UTF-8. + +Более подробная информация о клиенте располагается в разделе [«Клиент командной строки»](../interfaces/cli.md). + +Пример проверки работоспособности системы: + +``` bash +$ ./clickhouse-client +ClickHouse client version 0.0.18749. +Connecting to localhost:9000. +Connected to ClickHouse server version 0.0.18749. + +:) SELECT 1 + +SELECT 1 + +┌─1─┐ +│ 1 │ +└───┘ + +1 rows in set. Elapsed: 0.003 sec. + +:) +``` + +**Поздравляем, система работает!** + +Для дальнейших экспериментов можно попробовать загрузить один из тестовых наборов данных или пройти [пошаговое руководство для начинающих](https://clickhouse.yandex/tutorial.html). + +[Оригинальная статья](https://clickhouse.yandex/docs/ru/getting_started/install/) diff --git a/docs/ru/getting_started/tutorial.md b/docs/ru/getting_started/tutorial.md new file mode 120000 index 00000000000..8bc40816ab2 --- /dev/null +++ b/docs/ru/getting_started/tutorial.md @@ -0,0 +1 @@ +../../en/getting_started/tutorial.md \ No newline at end of file diff --git a/docs/ru/interfaces/cli.md b/docs/ru/interfaces/cli.md index f2040c4af1b..a67ae87f6ab 100644 --- a/docs/ru/interfaces/cli.md +++ b/docs/ru/interfaces/cli.md @@ -1,17 +1,23 @@ # Клиент командной строки -Для работы из командной строки вы можете использовать `clickhouse-client`: +ClickHouse предоставляет собственный клиент командной строки: `clickhouse-client`. Клиент поддерживает запуск с аргументами командной строки и с конфигурационными файлами. Подробнее читайте в разделе [Конфигурирование](#interfaces_cli_configuration). + +Клиент [устанавливается](../getting_started/index.md) пакетом `clickhouse-client` и запускается командой `clickhouse-client`. ```bash $ clickhouse-client -ClickHouse client version 0.0.26176. -Connecting to localhost:9000. -Connected to ClickHouse server version 0.0.26176. +ClickHouse client version 19.17.1.1579 (official build). +Connecting to localhost:9000 as user default. +Connected to ClickHouse server version 19.17.1 revision 54428. :) ``` -Клиент поддерживает параметры командной строки и конфигурационные файлы. Подробнее читайте в разделе "[Конфигурирование](#interfaces_cli_configuration)". +Клиенты и серверы различных версий совместимы, однако если клиент старее сервера, то некоторые новые фукнции могут быть недоступны. Мы рекомендуем использовать одинаковые версии клиента и сервера. При подключении клиента к более новому серверу `clickhouse-client` выводит сообщение: + +``` +ClickHouse client version is older than ClickHouse server. It may lack support for new features. +``` ## Использование {#cli_usage} diff --git a/docs/ru/interfaces/http.md b/docs/ru/interfaces/http.md index c7c32a46a4c..4da101796f1 100644 --- a/docs/ru/interfaces/http.md +++ b/docs/ru/interfaces/http.md @@ -28,8 +28,12 @@ $ wget -O- -q 'http://localhost:8123/?query=SELECT 1' $ echo -ne 'GET /?query=SELECT%201 HTTP/1.0\r\n\r\n' | nc localhost 8123 HTTP/1.0 200 OK +Date: Wed, 27 Nov 2019 10:30:18 GMT Connection: Close -Date: Fri, 16 Nov 2012 19:21:50 GMT +Content-Type: text/tab-separated-values; charset=UTF-8 +X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal +X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f +X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"} 1 ``` diff --git a/docs/ru/introduction/ya_metrika_task.md b/docs/ru/introduction/history.md similarity index 99% rename from docs/ru/introduction/ya_metrika_task.md rename to docs/ru/introduction/history.md index c7e22346ae5..c0035b51f82 100644 --- a/docs/ru/introduction/ya_metrika_task.md +++ b/docs/ru/introduction/history.md @@ -1,4 +1,4 @@ -# Постановка задачи в Яндекс.Метрике +# История ClickHouse ClickHouse изначально разрабатывался для обеспечения работы [Яндекс.Метрики](https://metrika.yandex.ru/), [второй крупнейшей в мире](http://w3techs.com/technologies/overview/traffic_analysis/all) платформы для веб аналитики, и продолжает быть её ключевым компонентом. При более 13 триллионах записей в базе данных и более 20 миллиардах событий в сутки, ClickHouse позволяет генерировать индивидуально настроенные отчёты на лету напрямую из неагрегированных данных. Данная статья вкратце демонстрирует какие цели исторически стояли перед ClickHouse на ранних этапах его развития. diff --git a/docs/ru/operations/monitoring.md b/docs/ru/operations/monitoring.md index da24c7e960b..248d478506b 100644 --- a/docs/ru/operations/monitoring.md +++ b/docs/ru/operations/monitoring.md @@ -34,4 +34,4 @@ ClickHouse собирает: Также, можно отслеживать доступность сервера через HTTP API. Отправьте `HTTP GET` к ресурсу `/`. Если сервер доступен, он отвечает `200 OK`. -Для мониторинга серверов в кластерной конфигурации необходимо установить параметр [max_replica_delay_for_distributed_queries](settings/settings.md#settings-max_replica_delay_for_distributed_queries) и использовать HTTP ресурс `/replicas-delay`. Если реплика доступна и не отстаёт от других реплик, то запрос к `/replicas-delay` возвращает `200 OK`. Если реплика отстаёт, то она возвращает информацию о размере отставания. +Для мониторинга серверов в кластерной конфигурации необходимо установить параметр [max_replica_delay_for_distributed_queries](settings/settings.md#settings-max_replica_delay_for_distributed_queries) и использовать HTTP ресурс `/replicas_status`. Если реплика доступна и не отстаёт от других реплик, то запрос к `/replicas_status` возвращает `200 OK`. Если реплика отстаёт, то она возвращает информацию о размере отставания. diff --git a/docs/ru/operations/server_settings/settings.md b/docs/ru/operations/server_settings/settings.md index aca2fed8063..ca1c255bee3 100644 --- a/docs/ru/operations/server_settings/settings.md +++ b/docs/ru/operations/server_settings/settings.md @@ -580,6 +580,33 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat ``` +## query_thread_log {#server_settings-query-thread-log} + +Настройка логирования потоков выполнения запросов, принятых с настройкой [log_query_threads=1](../settings/settings.md#settings-log-query-threads). + +Запросы логируются не в отдельный файл, а в системную таблицу [system.query_thread_log](../system_tables.md#system_tables-query-thread-log). Вы можете изменить название этой таблицы в параметре `table` (см. ниже). + +При настройке логирования используются следующие параметры: + +- `database` — имя базы данных; +- `table` — имя таблицы, куда будет записываться лог; +- `partition_by` — [произвольный ключ партиционирования](../../operations/table_engines/custom_partitioning_key.md) для таблицы с логами; +- `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу. + +Если таблица не существует, то ClickHouse создаст её. Если структура журнала запросов изменилась при обновлении сервера ClickHouse, то таблица со старой структурой переименовывается, а новая таблица создается автоматически. + +**Пример** + +```xml + + system + query_thread_log
+ toMonday(event_date) + 7500 +
+``` + + ## remote_servers {#server_settings_remote_servers} Конфигурация кластеров, которые использует движок таблиц [Distributed](../../operations/table_engines/distributed.md) и табличная функция `cluster`. diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 81cff172f98..e0045bd42ef 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -488,6 +488,16 @@ ClickHouse использует этот параметр при чтении д log_queries=1 +## log_query_threads {#settings-log-query-threads} + +Установка логирования информации о потоках выполнения запроса. + +Лог информации о потоках выполнения запросов, переданных в ClickHouse с этой установкой, записывается согласно правилам конфигурационного параметра сервера [query_thread_log](../server_settings/settings.md#server_settings-query-thread-log). + +**Пример** : + + log_query_threads=1 + ## max_insert_block_size {#settings-max_insert_block_size} Формировать блоки указанного размера, при вставке в таблицу. @@ -570,6 +580,12 @@ Default value: 10000 seconds. Значение по умолчанию: 10, 300, 300. +## cancel_http_readonly_queries_on_client_close + +Отменяет HTTP readonly запросы (например, SELECT), когда клиент обрывает соединение до завершения получения данных. + +Значение по умолчанию: 0 + ## poll_interval Блокироваться в цикле ожидания запроса в сервере на указанное количество секунд. diff --git a/docs/ru/operations/system_tables.md b/docs/ru/operations/system_tables.md index fa6c80bfb05..0e4ffc8e056 100644 --- a/docs/ru/operations/system_tables.md +++ b/docs/ru/operations/system_tables.md @@ -410,8 +410,8 @@ ClickHouse создаёт таблицу только в том случае, к - `'QueryFinish' = 2` — успешное завершение выполнения запроса. - `'ExceptionBeforeStart' = 3` — исключение перед началом обработки запроса. - `'ExceptionWhileProcessing' = 4` — исключение во время обработки запроса. -- `event_date` (Date) — дата события. -- `event_time` (DateTime) — время события. +- `event_date` (Date) — дата начала запроса. +- `event_time` (DateTime) — время начала запроса. - `query_start_time` (DateTime) — время начала обработки запроса. - `query_duration_ms` (UInt64) — длительность обработки запроса. - `read_rows` (UInt64) — количество прочитанных строк. @@ -421,43 +421,39 @@ ClickHouse создаёт таблицу только в том случае, к - `result_rows` (UInt64) — количество строк в результате. - `result_bytes` (UInt64) — объём результата в байтах. - `memory_usage` (UInt64) — потребление RAM запросом. -- `query` (String) — строка запроса. -- `exception` (String) — сообщение исключения. +- `query` (String) — текст запроса. +- `exception` (String) — сообщение исключения, если запрос завершился по исключению. - `stack_trace` (String) — трассировка (список функций, последовательно вызванных перед ошибкой). Пустая строка, если запрос успешно завершен. - `is_initial_query` (UInt8) — вид запроса. Возможные значения: - 1 — запрос был инициирован клиентом. - 0 — запрос был инициирован другим запросом при распределенном запросе. - `user` (String) — пользователь, запустивший текущий запрос. - `query_id` (String) — ID запроса. -- `address` (FixedString(16)) — IP адрес, с которого пришел запрос. -- `port` (UInt16) — порт, на котором сервер принял запрос. +- `address` (IPv6) — IP адрес, с которого пришел запрос. +- `port` (UInt16) — порт, с которого клиент сделал запрос - `initial_user` (String) — пользователь, запустивший первоначальный запрос (для распределенных запросов). - `initial_query_id` (String) — ID родительского запроса. -- `initial_address` (FixedString(16)) — IP адрес, с которого пришел родительский запрос. -- `initial_port` (UInt16) — порт, на котором сервер принял родительский запрос от клиента. +- `initial_address` (IPv6) — IP адрес, с которого пришел родительский запрос. +- `initial_port` (UInt16) — порт, с которого клиент сделал родительский запрос. - `interface` (UInt8) — интерфейс, с которого ушёл запрос. Возможные значения: - 1 — TCP. - 2 — HTTP. -- `os_user` (String) — операционная система пользователя. -- `client_hostname` (String) — имя сервера, к которому присоединился [clickhouse-client](../interfaces/cli.md). -- `client_name` (String) — [clickhouse-client](../interfaces/cli.md). -- `client_revision` (UInt32) — ревизия [clickhouse-client](../interfaces/cli.md). -- `client_version_major` (UInt32) — старшая версия [clickhouse-client](../interfaces/cli.md). -- `client_version_minor` (UInt32) — младшая версия [clickhouse-client](../interfaces/cli.md). -- `client_version_patch` (UInt32) — патч [clickhouse-client](../interfaces/cli.md). +- `os_user` (String) — имя пользователя в OS, который запустил [clickhouse-client](../interfaces/cli.md). +- `client_hostname` (String) — имя сервера, с которого присоединился [clickhouse-client](../interfaces/cli.md) или другой TCP клиент. +- `client_name` (String) — [clickhouse-client](../interfaces/cli.md) или другой TCP клиент. +- `client_revision` (UInt32) — ревизия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента. +- `client_version_major` (UInt32) — старшая версия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента. +- `client_version_minor` (UInt32) — младшая версия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента. +- `client_version_patch` (UInt32) — патч [clickhouse-client](../interfaces/cli.md) или другого TCP клиента. - `http_method` (UInt8) — HTTP метод, инициировавший запрос. Возможные значения: - 0 — запрос запущен с интерфейса TCP. - 1 — `GET`. - 2 — `POST`. - `http_user_agent` (String) — HTTP заголовок `UserAgent`. -- `quota_key` (String) — идентификатор квоты из настроек [квот](quotas.md). +- `quota_key` (String) — "ключ квоты" из настроек [квот](quotas.md) (см. `keyed`). - `revision` (UInt32) — ревизия ClickHouse. - `thread_numbers` (Array(UInt32)) — количество потоков, участвующих в обработке запросов. -- `ProfileEvents.Names` (Array(String)) — Счетчики для изменения метрик: - - Время, потраченное на чтение и запись по сети. - - Время, потраченное на чтение и запись на диск. - - Количество сетевых ошибок. - - Время, потраченное на ожидание, когда пропускная способность сети ограничена. +- `ProfileEvents.Names` (Array(String)) — Счетчики для изменения различных метрик. Описание метрик можно получить из таблицы [system.events](#system_tables-events - `ProfileEvents.Values` (Array(UInt64)) — метрики, перечисленные в столбце `ProfileEvents.Names`. - `Settings.Names` (Array(String)) — имена настроек, которые меняются, когда клиент выполняет запрос. Чтобы разрешить логирование изменений настроек, установите параметр `log_query_settings` равным 1. - `Settings.Values` (Array(String)) — Значения настроек, которые перечислены в столбце `Settings.Names`. @@ -477,6 +473,72 @@ ClickHouse создаёт таблицу только в том случае, к Можно указать произвольный ключ партиционирования для таблицы `system.query_log` в конфигурации [query_log](server_settings/settings.md#server_settings-query-log) (параметр `partition_by`). +## system.query_thread_log {#system_tables-query-thread-log} + +Содержит информацию о каждом потоке выполняемых запросов. + +ClickHouse создаёт таблицу только в том случае, когда установлен конфигурационный параметр сервера [query_thread_log](server_settings/settings.md#server_settings-query-thread-log). Параметр задаёт правила ведения лога, такие как интервал логирования или имя таблицы, в которую будут логгироваться запросы. + +Чтобы включить логирование, задайте значение параметра [log_query_threads](settings/settings.md#settings-log-query-threads) равным 1. Подробности смотрите в разделе [Настройки](settings/settings.md). + +Столбцы: + +- `event_date` (Date) — дата завершения выполнения запроса потоком. +- `event_time` (DateTime) — дата и время завершения выполнения запроса потоком. +- `query_start_time` (DateTime) — время начала обработки запроса. +- `query_duration_ms` (UInt64) — длительность обработки запроса в миллисекундах. +- `read_rows` (UInt64) — количество прочитанных строк. +- `read_bytes` (UInt64) — количество прочитанных байтов. +- `written_rows` (UInt64) — количество записанных строк для запросов `INSERT`. Для других запросов, значение столбца 0. +- `written_bytes` (UInt64) — объем записанных данных в байтах для запросов `INSERT`. Для других запросов, значение столбца 0. +- `memory_usage` (Int64) — разница между выделенной и освобождённой памятью в контексте потока. +- `peak_memory_usage` (Int64) — максимальная разница между выделенной и освобождённой памятью в контексте потока. +- `thread_name` (String) — Имя потока. +- `thread_number` (UInt32) — Внутренний ID потока. +- `os_thread_id` (Int32) — tid (ID потока операционной системы). +- `master_thread_number` (UInt32) — Внутренний ID главного потока. +- `master_os_thread_id` (Int32) — tid (ID потока операционной системы) главного потока. +- `query` (String) — текст запроса. +- `is_initial_query` (UInt8) — вид запроса. Возможные значения: + - 1 — запрос был инициирован клиентом. + - 0 — запрос был инициирован другим запросом при распределенном запросе. +- `user` (String) — пользователь, запустивший текущий запрос. +- `query_id` (String) — ID запроса. +- `address` (IPv6) — IP адрес, с которого пришел запрос. +- `port` (UInt16) — порт, с которого пришел запрос. +- `initial_user` (String) — пользователь, запустивший первоначальный запрос (для распределенных запросов). +- `initial_query_id` (String) — ID родительского запроса. +- `initial_address` (IPv6) — IP адрес, с которого пришел родительский запрос. +- `initial_port` (UInt16) — порт, пришел родительский запрос. +- `interface` (UInt8) — интерфейс, с которого ушёл запрос. Возможные значения: + - 1 — TCP. + - 2 — HTTP. +- `os_user` (String) — имя пользователя в OS, который запустил [clickhouse-client](../interfaces/cli.md). +- `client_hostname` (String) — hostname клиентской машины, с которой присоединился [clickhouse-client](../interfaces/cli.md) или другой TCP клиент. +- `client_name` (String) — [clickhouse-client](../interfaces/cli.md) или другой TCP клиент. +- `client_revision` (UInt32) — ревизия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента. +- `client_version_major` (UInt32) — старшая версия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента. +- `client_version_minor` (UInt32) — младшая версия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента. +- `client_version_patch` (UInt32) — патч [clickhouse-client](../interfaces/cli.md) или другого TCP клиента. +- `http_method` (UInt8) — HTTP метод, инициировавший запрос. Возможные значения: + - 0 — запрос запущен с интерфейса TCP. + - 1 — `GET`. + - 2 — `POST`. +- `http_user_agent` (String) — HTTP заголовок `UserAgent`. +- `quota_key` (String) — "ключ квоты" из настроек [квот](quotas.md) (см. `keyed`). +- `revision` (UInt32) — ревизия ClickHouse. +- `ProfileEvents.Names` (Array(String)) — Счетчики для изменения различных метрик для данного потока. Описание метрик можно получить из таблицы [system.events](#system_tables-events +- `ProfileEvents.Values` (Array(UInt64)) — метрики для данного потока, перечисленные в столбце `ProfileEvents.Names`. + +По умолчанию, строки добавляются в таблицу логирования с интервалом в 7,5 секунд. Можно задать интервал в конфигурационном параметре сервера [query_thread_log](server_settings/settings.md#server_settings-query-thread-log) (смотрите параметр `flush_interval_milliseconds`). Чтобы принудительно записать логи из буффера памяти в таблицу, используйте запрос `SYSTEM FLUSH LOGS`. + +Если таблицу удалить вручную, она пересоздастся автоматически "на лету". При этом все логи на момент удаления таблицы будут удалены. + +!!! note "Примечание" + Срок хранения логов не ограничен. Логи не удаляются из таблицы автоматически. Вам необходимо самостоятельно организовать удаление устаревших логов. + +Можно указать произвольный ключ партиционирования для таблицы `system.query_log` в конфигурации [query_thread_log](server_settings/settings.md#server_settings-query-thread-log) (параметр `partition_by`). + ## system.replicas {#system_tables-replicas} Содержит информацию и статус для реплицируемых таблиц, расположенных на локальном сервере. diff --git a/docs/ru/query_language/create.md b/docs/ru/query_language/create.md index 8a67d89106e..5379deb73e2 100644 --- a/docs/ru/query_language/create.md +++ b/docs/ru/query_language/create.md @@ -194,16 +194,17 @@ ENGINE = MergeTree() ClickHouse поддерживает временные таблицы со следующими характеристиками: -- временные таблицы исчезают после завершения сессии; в том числе, при обрыве соединения; +- Временные таблицы исчезают после завершения сессии, в том числе при обрыве соединения. - Временная таблица использует только модуль памяти. -- Невозможно указать базу данных для временной таблицы. Временные таблицы создается вне баз данных. -- если временная таблица имеет то же имя, что и некоторая другая, то, при упоминании в запросе без указания БД, будет использована временная таблица; -- при распределённой обработке запроса, используемые в запросе временные таблицы, передаются на удалённые серверы. +- Невозможно указать базу данных для временной таблицы. Она создается вне баз данных. +- Невозможно создать временную таблицу распределнным DDL запросом на всех серверах кластера (с опцией `ON CLUSTER`): такая таблица существует только в рамках существующей сессии. +- Если временная таблица имеет то же имя, что и некоторая другая, то, при упоминании в запросе без указания БД, будет использована временная таблица. +- При распределённой обработке запроса, используемые в запросе временные таблицы, передаются на удалённые серверы. Чтобы создать временную таблицу, используйте следующий синтаксис: ```sql -CREATE TEMPORARY TABLE [IF NOT EXISTS] table_name [ON CLUSTER cluster] +CREATE TEMPORARY TABLE [IF NOT EXISTS] table_name ( name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], @@ -213,6 +214,8 @@ CREATE TEMPORARY TABLE [IF NOT EXISTS] table_name [ON CLUSTER cluster] В большинстве случаев, временные таблицы создаются не вручную, а при использовании внешних данных для запроса, или при распределённом `(GLOBAL) IN`. Подробнее см. соответствующие разделы +Вместо временных можно использовать обычные таблицы с [ENGINE = Memory](../operations/table_engines/memory.md). + ## Распределенные DDL запросы (секция ON CLUSTER) Запросы `CREATE`, `DROP`, `ALTER`, `RENAME` поддерживают возможность распределенного выполнения на кластере. diff --git a/docs/ru/query_language/functions/array_functions.md b/docs/ru/query_language/functions/array_functions.md index 377750c99c1..f12395ca64d 100644 --- a/docs/ru/query_language/functions/array_functions.md +++ b/docs/ru/query_language/functions/array_functions.md @@ -814,4 +814,40 @@ SELECT arrayReverse([1, 2, 3]) Синоним для ["arrayReverse"](#array_functions-arrayreverse) +## arrayCompact {#arraycompact} + +Удаляет дубликаты из массива. Порядок результирующих значений определяется порядком в исходном массиве. + +**Синтаксис** + +```sql +arrayCompact(arr) +``` + +**Параметры** + +`arr` — [Массив](../../data_types/array.md) для обхода. + +**Возвращаемое значение** + +Массив без дубликатов. + +Тип: `Array`. + +**Пример** + +Запрос: + +```sql +SELECT arrayCompact([1, 1, nan, nan, 2, 3, 3, 3]) +``` + +Ответ: + +```text +┌─arrayCompact([1, 1, nan, nan, 2, 3, 3, 3])─┐ +│ [1,nan,nan,2,3] │ +└────────────────────────────────────────────┘ +``` + [Оригинальная статья](https://clickhouse.yandex/docs/ru/query_language/functions/array_functions/) diff --git a/docs/ru/query_language/functions/hash_functions.md b/docs/ru/query_language/functions/hash_functions.md index f7d2237a071..47384e78565 100644 --- a/docs/ru/query_language/functions/hash_functions.md +++ b/docs/ru/query_language/functions/hash_functions.md @@ -207,6 +207,44 @@ SELECT javaHash('Hello, world!'); └───────────────────────────┘ ``` +## javaHashUTF16LE {#javahashutf16le} + +Вычисляет [JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452) от строки, при допущении, что строка представлена в кодировке `UTF-16LE`. + +**Синтаксис** + +```sql +javaHashUTF16LE(stringUtf16le) +``` + +**Параметры** + +- `stringUtf16le` — строка в `UTF-16LE`. + +**Возвращаемое значение** + +Хэш-значение типа `Int32`. + +Тип: `javaHash`. + +**Пример** + +Верный запрос для строки кодированной в `UTF-16LE`. + +Запрос: + +```sql +SELECT javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le')) +``` + +Ответ: + +```text +┌─javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le'))─┐ +│ 3556498 │ +└──────────────────────────────────────────────────────────────┘ +``` + ## hiveHash {#hash_functions-hivehash} Вычисляет `HiveHash` от строки. diff --git a/docs/ru/query_language/functions/json_functions.md b/docs/ru/query_language/functions/json_functions.md index 49f575f4b78..9269493473b 100644 --- a/docs/ru/query_language/functions/json_functions.md +++ b/docs/ru/query_language/functions/json_functions.md @@ -199,9 +199,9 @@ SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8') ## JSONExtractRaw(json[, indices_or_keys]...) -Возвращает часть JSON. +Возвращает часть JSON в виде строки, содержащей неразобранную подстроку. -Если значение не существует или имеет неверный тип, то возвращается пустая строка. +Если значение не существует, то возвращается пустая строка. Пример: @@ -209,4 +209,16 @@ SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8') SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = '[-100, 200.0, 300]' ``` +## JSONExtractArrayRaw(json[, indices_or_keys]...) + +Возвращает массив из элементов JSON массива, каждый из которых представлен в виде строки с неразобранными подстроками из JSON. + +Если значение не существует или не является массивом, то возвращается пустой массив. + +Пример: + +```sql +SELECT JSONExtractArrayRaw('{"a": "hello", "b": [-100, 200.0, "hello"]}', 'b') = ['-100', '200.0', '"hello"']' +``` + [Оригинальная статья](https://clickhouse.yandex/docs/ru/query_language/functions/json_functions/) diff --git a/docs/ru/query_language/functions/other_functions.md b/docs/ru/query_language/functions/other_functions.md index a7e6051e541..da47839d3ca 100644 --- a/docs/ru/query_language/functions/other_functions.md +++ b/docs/ru/query_language/functions/other_functions.md @@ -4,6 +4,40 @@ Возвращает строку - имя хоста, на котором эта функция была выполнена. При распределённой обработке запроса, это будет имя хоста удалённого сервера, если функция выполняется на удалённом сервере. +## FQDN {#fqdn} + +Возвращает полное имя домена. + +**Синтаксис** + +```sql +fqdn(); +``` + +Эта функция регистронезависимая. + +**Возвращаемое значение** + +- Полное имя домена. + +Тип: `String`. + +**Пример** + +Запрос: + +```sql +SELECT FQDN(); +``` + +Ответ: + +```text +┌─FQDN()──────────────────────────┐ +│ clickhouse.ru-central1.internal │ +└─────────────────────────────────┘ +``` + ## basename Извлекает конечную часть строки после последнего слэша или бэкслэша. Функция часто используется для извлечения имени файла из пути. diff --git a/docs/ru/query_language/functions/string_functions.md b/docs/ru/query_language/functions/string_functions.md index 5e5a270f51b..2169cb794e0 100644 --- a/docs/ru/query_language/functions/string_functions.md +++ b/docs/ru/query_language/functions/string_functions.md @@ -189,6 +189,120 @@ SELECT startsWith('Hello, world!', 'He'); └───────────────────────────────────┘ ``` +## trimLeft {#trimleft} + +Удаляет все последовательные вхождения обычных пробелов (32 символ ASCII) с левого конца строки. Не удаляет другие виды пробелов (табуляция, пробел без разрыва и т. д.). + +**Синтаксис** + +```sql +trimLeft() +``` + +Алиас: `ltrim`. + +**Параметры** + +- `string` — строка для обрезки. [String](../../data_types/string.md). + +**Возвращаемое значение** + +Исходную строку без общих пробельных символов слева. + +Тип: `String`. + +**Пример** + +Запрос: + +```sql +SELECT trimLeft(' Hello, world! ') +``` + +Ответ: + +```text +┌─trimLeft(' Hello, world! ')─┐ +│ Hello, world! │ +└─────────────────────────────────────┘ +``` + +## trimRight {#trimright} + +Удаляет все последовательные вхождения обычных пробелов (32 символ ASCII) с правого конца строки. Не удаляет другие виды пробелов (табуляция, пробел без разрыва и т. д.). + +**Синтаксис** + +```sql +trimRight() +``` + +Алиас: `rtrim`. + +**Параметры** + +- `string` — строка для обрезки. [String](../../data_types/string.md). + +**Возвращаемое значение** + +Исходную строку без общих пробельных символов справа. + +Тип: `String`. + +**Пример** + +Запрос: + +```sql +SELECT trimRight(' Hello, world! ') +``` + +Ответ: + +```text +┌─trimRight(' Hello, world! ')─┐ +│ Hello, world! │ +└──────────────────────────────────────┘ +``` + +## trimBoth {#trimboth} + +Удаляет все последовательные вхождения обычных пробелов (32 символ ASCII) с обоих концов строки. Не удаляет другие виды пробелов (табуляция, пробел без разрыва и т. д.). + +**Синтаксис** + +```sql +trimBoth() +``` + +Алиас: `trim`. + +**Параметры** + +- `string` — строка для обрезки. [String](../../data_types/string.md). + +**Возвращаемое значение** + +Исходную строку без общих пробельных символов с обоих концов строки. + +Тип: `String`. + +**Пример** + +Запрос: + +```sql +SELECT trimBoth(' Hello, world! ') +``` + +Ответ: + +```text +┌─trimBoth(' Hello, world! ')─┐ +│ Hello, world! │ +└─────────────────────────────────────┘ +``` + ## CRC32(s) Возвращает чексумму CRC32 данной строки, используется CRC-32-IEEE 802.3 многочлен и начальным значением `0xffffffff` (т.к. используется реализация из zlib). diff --git a/docs/ru/query_language/functions/type_conversion_functions.md b/docs/ru/query_language/functions/type_conversion_functions.md index a94d96e7022..3fb431fa3b2 100644 --- a/docs/ru/query_language/functions/type_conversion_functions.md +++ b/docs/ru/query_language/functions/type_conversion_functions.md @@ -349,4 +349,48 @@ SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null └─────────────────────────────────────────┘ ``` +## toInterval(Year|Quarter|Month|Week|Day|Hour|Minute|Second) {#function-tointerval} + +Приводит аргумент из числового типа данных к типу данных [IntervalType](../../data_types/special_data_types/interval.md). + +**Синтксис** + +```sql +toIntervalSecond(number) +toIntervalMinute(number) +toIntervalHour(number) +toIntervalDay(number) +toIntervalWeek(number) +toIntervalMonth(number) +toIntervalQuarter(number) +toIntervalYear(number) +``` + +**Параметры** + +- `number` — длительность интервала. Положительное целое число. + +**Возвращаемые значения** + +- Значение с типом данных `Interval`. + +**Пример** + +```sql +WITH + toDate('2019-01-01') AS date, + INTERVAL 1 WEEK AS interval_week, + toIntervalWeek(1) AS interval_to_week +SELECT + date + interval_week, + date + interval_to_week +``` + +```text +┌─plus(date, interval_week)─┬─plus(date, interval_to_week)─┐ +│ 2019-01-08 │ 2019-01-08 │ +└───────────────────────────┴──────────────────────────────┘ +``` + + [Оригинальная статья](https://clickhouse.yandex/docs/ru/query_language/functions/type_conversion_functions/) diff --git a/docs/ru/query_language/operators.md b/docs/ru/query_language/operators.md index 74aa2270e90..31378760fcb 100644 --- a/docs/ru/query_language/operators.md +++ b/docs/ru/query_language/operators.md @@ -67,6 +67,8 @@ ## Оператор для работы с датами и временем {#operators-datetime} +### EXTRACT + ```sql EXTRACT(part FROM date); ``` @@ -128,6 +130,39 @@ FROM test.Orders; Больше примеров приведено в [тестах](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/tests/queries/0_stateless/00619_extract.sql). +### INTERVAL {#operator-interval} + +Создаёт значение типа [Interval](../data_types/special_data_types/interval.md) которое должно использоваться в арифметических операциях со значениями типов [Date](../data_types/date.md) и [DateTime](../data_types/datetime.md). + +Типы интервалов: +- `SECOND` +- `MINUTE` +- `HOUR` +- `DAY` +- `WEEK` +- `MONTH` +- `QUARTER` +- `YEAR` + +!!! warning "Внимание" + Интервалы различных типов нельзя объединять. Нельзя использовать выражения вида `INTERVAL 4 DAY 1 HOUR`. Вместо этого интервалы можно выразить в единицах меньших или равных наименьшей единице интервала, Например, `INTERVAL 25 HOUR`. Также можно выполнять последовательные операции как показано в примере ниже. + +Пример: + +```sql +SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR +``` +```text +┌───current_date_time─┬─plus(plus(now(), toIntervalDay(4)), toIntervalHour(3))─┐ +│ 2019-10-23 11:16:28 │ 2019-10-27 14:16:28 │ +└─────────────────────┴────────────────────────────────────────────────────────┘ +``` + +**Смотрите также** + +- Тип данных [Interval](../data_types/special_data_types/interval.md) +- Функции преобразования типов [toInterval](functions/type_conversion_functions.md#function-tointerval) + ## Оператор логического отрицания `NOT a` - функция `not(a)` diff --git a/docs/ru/query_language/system.md b/docs/ru/query_language/system.md index 3457a87e188..31e0c3cf90b 100644 --- a/docs/ru/query_language/system.md +++ b/docs/ru/query_language/system.md @@ -3,7 +3,7 @@ - [RELOAD DICTIONARIES](#query_language-system-reload-dictionaries) - [RELOAD DICTIONARY](#query_language-system-reload-dictionary) - [DROP DNS CACHE](#query_language-system-drop-dns-cache) -- [DROP MARKS CACHE](#query_language-system-drop-marks-cache) +- [DROP MARK CACHE](#query_language-system-drop-mark-cache) - [FLUSH LOGS](#query_language-system-flush_logs) - [RELOAD CONFIG](#query_language-system-reload-config) - [SHUTDOWN](#query_language-system-shutdown) @@ -36,7 +36,7 @@ SELECT name, status FROM system.dictionaries; Для более удобного (автоматического) управления кешем см. параметры disable_internal_dns_cache, dns_cache_update_period. -## DROP MARKS CACHE {#query_language-system-drop-marks-cache} +## DROP MARK CACHE {#query_language-system-drop-mark-cache} Сбрасывает кеш "засечек" (`mark cache`). Используется при разработке ClickHouse и тестах производительности. diff --git a/docs/toc_en.yml b/docs/toc_en.yml index 3c4a5506a06..8a2b32b240a 100644 --- a/docs/toc_en.yml +++ b/docs/toc_en.yml @@ -5,10 +5,12 @@ nav: - 'Distinctive Features of ClickHouse': 'introduction/distinctive_features.md' - 'ClickHouse Features that Can Be Considered Disadvantages': 'introduction/features_considered_disadvantages.md' - 'Performance': 'introduction/performance.md' - - 'The Yandex.Metrica Task': 'introduction/ya_metrika_task.md' + - 'History': 'introduction/history.md' - 'Getting Started': - - 'Deploying and Running': 'getting_started/index.md' + - 'hidden': 'getting_started/index.md' + - 'Installation': 'getting_started/install.md' + - 'Tutorial': 'getting_started/tutorial.md' - 'Example Datasets': - 'OnTime': 'getting_started/example_datasets/ontime.md' - 'New York Taxi Data': 'getting_started/example_datasets/nyc_taxi.md' @@ -119,6 +121,7 @@ nav: - 'Working with geographical coordinates': 'query_language/functions/geo.md' - 'Working with Nullable arguments': 'query_language/functions/functions_for_nulls.md' - 'Machine Learning Functions': 'query_language/functions/machine_learning_functions.md' + - 'Introspection': 'query_language/functions/introspection.md' - 'Other': 'query_language/functions/other_functions.md' - 'Aggregate Functions': - 'Introduction': 'query_language/agg_functions/index.md' @@ -215,8 +218,9 @@ nav: - 'Overview of ClickHouse Architecture': 'development/architecture.md' - 'How to Build ClickHouse on Linux': 'development/build.md' - 'How to Build ClickHouse on Mac OS X': 'development/build_osx.md' - - 'How to Build ClickHouse on Linux for Mac OS X': 'development/build_cross.md' - - 'How to Write C++ code': 'development/style.md' + - 'How to Build ClickHouse on Linux for Mac OS X': 'development/build_cross_osx.md' + - 'How to Build ClickHouse on Linux for AARCH64 (ARM64)': 'development/build_cross_arm.md' + - 'How to Write C++ Code': 'development/style.md' - 'How to Run ClickHouse Tests': 'development/tests.md' - 'The Beginner ClickHouse Developer Instruction': 'development/developer_instruction.md' - 'Third-Party Libraries Used': 'development/contrib.md' diff --git a/docs/toc_fa.yml b/docs/toc_fa.yml index 207034a8718..c5a2a7fd80b 100644 --- a/docs/toc_fa.yml +++ b/docs/toc_fa.yml @@ -1,15 +1,17 @@ nav: -- 'Introduction': +- 'ﯽﻓﺮﻌﻣ': - 'ClickHouse چیست؟': 'index.md' - ' ویژگی های برجسته ClickHouse': 'introduction/distinctive_features.md' - ' ویژگی های از ClickHouse که می تواند معایبی باشد': 'introduction/features_considered_disadvantages.md' - - 'Performance': 'introduction/performance.md' - - 'The Yandex.Metrica task': 'introduction/ya_metrika_task.md' + - 'ﯽﯾﺍﺭﺎﮐ': 'introduction/performance.md' + - 'ﺦﯾﺭﺎﺗ': 'introduction/history.md' - 'Getting started': - - ' شروع به کار': 'getting_started/index.md' - - 'Example datasets': + - 'hidden': 'getting_started/index.md' + - 'ﯼﺯﺍﺪﻧﺍ ﻩﺍﺭ ﻭ ﺐﺼﻧ': 'getting_started/install.md' + - 'ﺵﺯﻮﻣﺁ': 'getting_started/tutorial.md' + - 'ﻪﻧﻮﻤﻧ ﯼﺎﻫ ﻩﺩﺍﺩ ﻪﻋﻮﻤﺠﻣ': - 'OnTime': 'getting_started/example_datasets/ontime.md' - ' داده های تاکسی New York': 'getting_started/example_datasets/nyc_taxi.md' - ' بنچمارک AMPLab Big Data': 'getting_started/example_datasets/amplab_benchmark.md' @@ -18,7 +20,7 @@ nav: - ' بنچمارک Star Schema': 'getting_started/example_datasets/star_schema.md' - 'Yandex.Metrica Data': 'getting_started/example_datasets/metrica.md' -- 'Interfaces': +- 'ﻂﺑﺍﺭ': - 'Interface ها': 'interfaces/index.md' - ' کلاینت Command-line': 'interfaces/cli.md' - 'Native interface (TCP)': 'interfaces/tcp.md' @@ -33,7 +35,7 @@ nav: - 'رابط های بصری': 'interfaces/third-party/gui.md' - 'پروکسی': 'interfaces/third-party/proxy.md' -- 'Data types': +- 'ﻩﺩﺍﺩ ﻉﺍﻮﻧﺍ': - 'Introduction': 'data_types/index.md' - 'UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64': 'data_types/int_uint.md' - 'Float32, Float64': 'data_types/float.md' @@ -147,6 +149,7 @@ nav: - 'Working with geographical coordinates': 'query_language/functions/geo.md' - 'Working with Nullable arguments': 'query_language/functions/functions_for_nulls.md' - 'Machine Learning Functions': 'query_language/functions/machine_learning_functions.md' + - 'Introspection': 'query_language/functions/introspection.md' - 'Other': 'query_language/functions/other_functions.md' - 'Aggregate Functions': - 'Introduction': 'query_language/agg_functions/index.md' diff --git a/docs/toc_ja.yml b/docs/toc_ja.yml new file mode 100644 index 00000000000..8a2b32b240a --- /dev/null +++ b/docs/toc_ja.yml @@ -0,0 +1,234 @@ +nav: + +- 'Introduction': + - 'Overview': 'index.md' + - 'Distinctive Features of ClickHouse': 'introduction/distinctive_features.md' + - 'ClickHouse Features that Can Be Considered Disadvantages': 'introduction/features_considered_disadvantages.md' + - 'Performance': 'introduction/performance.md' + - 'History': 'introduction/history.md' + +- 'Getting Started': + - 'hidden': 'getting_started/index.md' + - 'Installation': 'getting_started/install.md' + - 'Tutorial': 'getting_started/tutorial.md' + - 'Example Datasets': + - 'OnTime': 'getting_started/example_datasets/ontime.md' + - 'New York Taxi Data': 'getting_started/example_datasets/nyc_taxi.md' + - 'AMPLab Big Data Benchmark': 'getting_started/example_datasets/amplab_benchmark.md' + - 'WikiStat': 'getting_started/example_datasets/wikistat.md' + - 'Terabyte Click Logs from Criteo': 'getting_started/example_datasets/criteo.md' + - 'Star Schema Benchmark': 'getting_started/example_datasets/star_schema.md' + - 'Yandex.Metrica Data': 'getting_started/example_datasets/metrica.md' + +- 'Interfaces': + - 'Introduction': 'interfaces/index.md' + - 'Command-Line Client': 'interfaces/cli.md' + - 'Native Interface (TCP)': 'interfaces/tcp.md' + - 'HTTP Interface': 'interfaces/http.md' + - 'Input and Output Formats': 'interfaces/formats.md' + - 'JDBC Driver': 'interfaces/jdbc.md' + - 'ODBC Driver': 'interfaces/odbc.md' + - 'C++ Client Library': 'interfaces/cpp.md' + - 'Third-Party': + - 'Client Libraries': 'interfaces/third-party/client_libraries.md' + - 'Integrations': 'interfaces/third-party/integrations.md' + - 'Visual Interfaces': 'interfaces/third-party/gui.md' + - 'Proxies': 'interfaces/third-party/proxy.md' + +- 'Database Engines': + - 'Introduction': 'database_engines/index.md' + - 'MySQL': 'database_engines/mysql.md' + - 'Lazy': 'database_engines/lazy.md' + +- 'Table Engines': + - 'Introduction': 'operations/table_engines/index.md' + - 'MergeTree Family': + - 'MergeTree': 'operations/table_engines/mergetree.md' + - 'Data Replication': 'operations/table_engines/replication.md' + - 'Custom Partitioning Key': 'operations/table_engines/custom_partitioning_key.md' + - 'ReplacingMergeTree': 'operations/table_engines/replacingmergetree.md' + - 'SummingMergeTree': 'operations/table_engines/summingmergetree.md' + - 'AggregatingMergeTree': 'operations/table_engines/aggregatingmergetree.md' + - 'CollapsingMergeTree': 'operations/table_engines/collapsingmergetree.md' + - 'VersionedCollapsingMergeTree': 'operations/table_engines/versionedcollapsingmergetree.md' + - 'GraphiteMergeTree': 'operations/table_engines/graphitemergetree.md' + - 'Log Family': + - 'Introduction': 'operations/table_engines/log_family.md' + - 'StripeLog': 'operations/table_engines/stripelog.md' + - 'Log': 'operations/table_engines/log.md' + - 'TinyLog': 'operations/table_engines/tinylog.md' + - 'Integrations': + - 'Kafka': 'operations/table_engines/kafka.md' + - 'MySQL': 'operations/table_engines/mysql.md' + - 'JDBC': 'operations/table_engines/jdbc.md' + - 'ODBC': 'operations/table_engines/odbc.md' + - 'HDFS': 'operations/table_engines/hdfs.md' + - 'Special': + - 'Distributed': 'operations/table_engines/distributed.md' + - 'External data': 'operations/table_engines/external_data.md' + - 'Dictionary': 'operations/table_engines/dictionary.md' + - 'Merge': 'operations/table_engines/merge.md' + - 'File': 'operations/table_engines/file.md' + - 'Null': 'operations/table_engines/null.md' + - 'Set': 'operations/table_engines/set.md' + - 'Join': 'operations/table_engines/join.md' + - 'URL': 'operations/table_engines/url.md' + - 'View': 'operations/table_engines/view.md' + - 'MaterializedView': 'operations/table_engines/materializedview.md' + - 'Memory': 'operations/table_engines/memory.md' + - 'Buffer': 'operations/table_engines/buffer.md' + +- 'SQL Reference': + - 'hidden': 'query_language/index.md' + - 'Syntax': 'query_language/syntax.md' + - 'Statements': + - 'SELECT': 'query_language/select.md' + - 'INSERT INTO': 'query_language/insert_into.md' + - 'CREATE': 'query_language/create.md' + - 'ALTER': 'query_language/alter.md' + - 'SYSTEM': 'query_language/system.md' + - 'SHOW': 'query_language/show.md' + - 'Other': 'query_language/misc.md' + - 'Functions': + - 'Introduction': 'query_language/functions/index.md' + - 'Arithmetic': 'query_language/functions/arithmetic_functions.md' + - 'Comparison': 'query_language/functions/comparison_functions.md' + - 'Logical': 'query_language/functions/logical_functions.md' + - 'Type Conversion': 'query_language/functions/type_conversion_functions.md' + - 'Working with Dates and Times': 'query_language/functions/date_time_functions.md' + - 'Working with strings': 'query_language/functions/string_functions.md' + - 'For Searching Strings': 'query_language/functions/string_search_functions.md' + - 'For Replacing in Strings': 'query_language/functions/string_replace_functions.md' + - 'Conditional ': 'query_language/functions/conditional_functions.md' + - 'Mathematical': 'query_language/functions/math_functions.md' + - 'Rounding': 'query_language/functions/rounding_functions.md' + - 'Working with Arrays': 'query_language/functions/array_functions.md' + - 'Splitting and Merging Strings and Arrays': 'query_language/functions/splitting_merging_functions.md' + - 'Bit': 'query_language/functions/bit_functions.md' + - 'Bitmap': 'query_language/functions/bitmap_functions.md' + - 'Hash': 'query_language/functions/hash_functions.md' + - 'Generating Pseudo-Random Numbers': 'query_language/functions/random_functions.md' + - 'Encoding': 'query_language/functions/encoding_functions.md' + - 'Working with UUID': 'query_language/functions/uuid_functions.md' + - 'Working with URLs': 'query_language/functions/url_functions.md' + - 'Working with IP Addresses': 'query_language/functions/ip_address_functions.md' + - 'Working with JSON.': 'query_language/functions/json_functions.md' + - 'Higher-Order': 'query_language/functions/higher_order_functions.md' + - 'Working with External Dictionaries': 'query_language/functions/ext_dict_functions.md' + - 'Working with Yandex.Metrica Dictionaries': 'query_language/functions/ym_dict_functions.md' + - 'Implementing the IN Operator': 'query_language/functions/in_functions.md' + - 'arrayJoin': 'query_language/functions/array_join.md' + - 'Working with geographical coordinates': 'query_language/functions/geo.md' + - 'Working with Nullable arguments': 'query_language/functions/functions_for_nulls.md' + - 'Machine Learning Functions': 'query_language/functions/machine_learning_functions.md' + - 'Introspection': 'query_language/functions/introspection.md' + - 'Other': 'query_language/functions/other_functions.md' + - 'Aggregate Functions': + - 'Introduction': 'query_language/agg_functions/index.md' + - 'Reference': 'query_language/agg_functions/reference.md' + - 'Aggregate function combinators': 'query_language/agg_functions/combinators.md' + - 'Parametric aggregate functions': 'query_language/agg_functions/parametric_functions.md' + - 'Table Functions': + - 'Introduction': 'query_language/table_functions/index.md' + - 'file': 'query_language/table_functions/file.md' + - 'merge': 'query_language/table_functions/merge.md' + - 'numbers': 'query_language/table_functions/numbers.md' + - 'remote': 'query_language/table_functions/remote.md' + - 'url': 'query_language/table_functions/url.md' + - 'mysql': 'query_language/table_functions/mysql.md' + - 'jdbc': 'query_language/table_functions/jdbc.md' + - 'odbc': 'query_language/table_functions/odbc.md' + - 'hdfs': 'query_language/table_functions/hdfs.md' + - 'input': 'query_language/table_functions/input.md' + - 'Dictionaries': + - 'Introduction': 'query_language/dicts/index.md' + - 'External Dictionaries': + - 'General Description': 'query_language/dicts/external_dicts.md' + - 'Configuring an External Dictionary': 'query_language/dicts/external_dicts_dict.md' + - 'Storing Dictionaries in Memory': 'query_language/dicts/external_dicts_dict_layout.md' + - 'Dictionary Updates': 'query_language/dicts/external_dicts_dict_lifetime.md' + - 'Sources of External Dictionaries': 'query_language/dicts/external_dicts_dict_sources.md' + - 'Dictionary Key and Fields': 'query_language/dicts/external_dicts_dict_structure.md' + - 'Internal Dictionaries': 'query_language/dicts/internal_dicts.md' + - 'Operators': 'query_language/operators.md' + - 'Data Types': + - 'Introduction': 'data_types/index.md' + - 'UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64': 'data_types/int_uint.md' + - 'Float32, Float64': 'data_types/float.md' + - 'Decimal': 'data_types/decimal.md' + - 'Boolean': 'data_types/boolean.md' + - 'String': 'data_types/string.md' + - 'FixedString(N)': 'data_types/fixedstring.md' + - 'UUID': 'data_types/uuid.md' + - 'Date': 'data_types/date.md' + - 'DateTime': 'data_types/datetime.md' + - 'Enum': 'data_types/enum.md' + - 'Array(T)': 'data_types/array.md' + - 'AggregateFunction(name, types_of_arguments...)': 'data_types/nested_data_structures/aggregatefunction.md' + - 'Tuple(T1, T2, ...)': 'data_types/tuple.md' + - 'Nullable': 'data_types/nullable.md' + - 'Nested Data Structures': + - 'hidden': 'data_types/nested_data_structures/index.md' + - 'Nested(Name1 Type1, Name2 Type2, ...)': 'data_types/nested_data_structures/nested.md' + - 'Special Data Types': + - 'hidden': 'data_types/special_data_types/index.md' + - 'Expression': 'data_types/special_data_types/expression.md' + - 'Set': 'data_types/special_data_types/set.md' + - 'Nothing': 'data_types/special_data_types/nothing.md' + - 'Interval': 'data_types/special_data_types/interval.md' + - 'Domains': + - 'Overview': 'data_types/domains/overview.md' + - 'IPv4': 'data_types/domains/ipv4.md' + - 'IPv6': 'data_types/domains/ipv6.md' + +- 'Guides': + - 'Overview': 'guides/index.md' + - 'Applying CatBoost Models': 'guides/apply_catboost_model.md' + +- 'Operations': + - 'Introduction': 'operations/index.md' + - 'Requirements': 'operations/requirements.md' + - 'Monitoring': 'operations/monitoring.md' + - 'Troubleshooting': 'operations/troubleshooting.md' + - 'Usage Recommendations': 'operations/tips.md' + - 'ClickHouse Update': 'operations/update.md' + - 'Access Rights': 'operations/access_rights.md' + - 'Data Backup': 'operations/backup.md' + - 'Configuration Files': 'operations/configuration_files.md' + - 'Quotas': 'operations/quotas.md' + - 'System Tables': 'operations/system_tables.md' + - 'Server Configuration Parameters': + - 'Introduction': 'operations/server_settings/index.md' + - 'Server Settings': 'operations/server_settings/settings.md' + - 'Settings': + - 'Introduction': 'operations/settings/index.md' + - 'Permissions for Queries': 'operations/settings/permissions_for_queries.md' + - 'Restrictions on Query Complexity': 'operations/settings/query_complexity.md' + - 'Settings': 'operations/settings/settings.md' + - 'Settings Profiles': 'operations/settings/settings_profiles.md' + - 'Constraints on Settings': 'operations/settings/constraints_on_settings.md' + - 'User Settings': 'operations/settings/settings_users.md' + - 'Utilities': + - 'Overview': 'operations/utils/index.md' + - 'clickhouse-copier': 'operations/utils/clickhouse-copier.md' + - 'clickhouse-local': 'operations/utils/clickhouse-local.md' + +- 'Development': + - 'hidden': 'development/index.md' + - 'Overview of ClickHouse Architecture': 'development/architecture.md' + - 'How to Build ClickHouse on Linux': 'development/build.md' + - 'How to Build ClickHouse on Mac OS X': 'development/build_osx.md' + - 'How to Build ClickHouse on Linux for Mac OS X': 'development/build_cross_osx.md' + - 'How to Build ClickHouse on Linux for AARCH64 (ARM64)': 'development/build_cross_arm.md' + - 'How to Write C++ Code': 'development/style.md' + - 'How to Run ClickHouse Tests': 'development/tests.md' + - 'The Beginner ClickHouse Developer Instruction': 'development/developer_instruction.md' + - 'Third-Party Libraries Used': 'development/contrib.md' + +- 'What''s New': + - 'Roadmap': 'roadmap.md' + - 'Changelog': 'changelog.md' + - 'Security Changelog': 'security_changelog.md' + +- 'F.A.Q.': + - 'General Questions': 'faq/general.md' diff --git a/docs/toc_ru.yml b/docs/toc_ru.yml index 5c71894c8f6..469590b6bc8 100644 --- a/docs/toc_ru.yml +++ b/docs/toc_ru.yml @@ -5,11 +5,13 @@ nav: - 'Отличительные возможности ClickHouse': 'introduction/distinctive_features.md' - 'Особенности ClickHouse, которые могут считаться недостатками': 'introduction/features_considered_disadvantages.md' - 'Производительность': 'introduction/performance.md' - - 'Постановка задачи в Яндекс.Метрике': 'introduction/ya_metrika_task.md' + - 'История': 'introduction/history.md' - 'Информационная поддержка': 'introduction/info.md' - 'Начало работы': - - 'Установка и запуск': 'getting_started/index.md' + - 'hidden': 'getting_started/index.md' + - 'Установка': 'getting_started/install.md' + - 'Руководство для начинающих': 'getting_started/tutorial.md' - 'Тестовые наборы данных': - 'OnTime': 'getting_started/example_datasets/ontime.md' - 'Данные о такси в Нью-Йорке': 'getting_started/example_datasets/nyc_taxi.md' @@ -172,6 +174,7 @@ nav: - 'Expression': 'data_types/special_data_types/expression.md' - 'Set': 'data_types/special_data_types/set.md' - 'Nothing': 'data_types/special_data_types/nothing.md' + - 'Interval': 'data_types/special_data_types/interval.md' - 'Domains': - 'Overview': 'data_types/domains/overview.md' - 'IPv4': 'data_types/domains/ipv4.md' @@ -214,7 +217,7 @@ nav: - 'Обзор архитектуры ClickHouse': 'development/architecture.md' - 'Как собрать ClickHouse на Linux': 'development/build.md' - 'Как собрать ClickHouse на Mac OS X': 'development/build_osx.md' - - 'Как собрать ClickHouse на Linux для Mac OS X': 'development/build_cross.md' + - 'Как собрать ClickHouse на Linux для Mac OS X': 'development/build_cross_osx.md' - 'Как писать код на C++': 'development/style.md' - 'Как запустить тесты': 'development/tests.md' - 'Инструкция для начинающего разработчика ClickHouse': 'development/developer_instruction.md' diff --git a/docs/toc_zh.yml b/docs/toc_zh.yml index c8be2ad7606..7395dcfe145 100644 --- a/docs/toc_zh.yml +++ b/docs/toc_zh.yml @@ -5,10 +5,12 @@ nav: - 'ClickHouse的独特功能': 'introduction/distinctive_features.md' - 'ClickHouse功能可被视为缺点': 'introduction/features_considered_disadvantages.md' - '性能': 'introduction/performance.md' - - 'Yandex.Metrica使用案例': 'introduction/ya_metrika_task.md' + - '历史': 'introduction/history.md' - '入门指南': - - '部署运行': 'getting_started/index.md' + - 'hidden': 'getting_started/index.md' + - '安装': 'getting_started/install.md' + - '教程': 'getting_started/tutorial.md' - '示例数据集': - '航班飞行数据': 'getting_started/example_datasets/ontime.md' - '纽约市出租车数据': 'getting_started/example_datasets/nyc_taxi.md' @@ -16,6 +18,7 @@ nav: - '维基访问数据': 'getting_started/example_datasets/wikistat.md' - 'Criteo TB级别点击日志': 'getting_started/example_datasets/criteo.md' - 'Star Schema基准测试': 'getting_started/example_datasets/star_schema.md' + - 'Yandex.Metrica': 'getting_started/example_datasets/metrica.md' - '客户端': - '介绍': 'interfaces/index.md' @@ -145,6 +148,7 @@ nav: - 'GEO函数': 'query_language/functions/geo.md' - 'Nullable处理函数': 'query_language/functions/functions_for_nulls.md' - '机器学习函数': 'query_language/functions/machine_learning_functions.md' + - 'Introspection': 'query_language/functions/introspection.md' - '其他函数': 'query_language/functions/other_functions.md' - '聚合函数': - '介绍': 'query_language/agg_functions/index.md' @@ -212,7 +216,7 @@ nav: - 'ClickHouse架构概述': 'development/architecture.md' - '如何在Linux中编译ClickHouse': 'development/build.md' - '如何在Mac OS X中编译ClickHouse': 'development/build_osx.md' - - '如何在Linux中编译Mac OS X ClickHouse': 'development/build_cross.md' + - '如何在Linux中编译Mac OS X ClickHouse': 'development/build_cross_osx.md' - '如何编写C++代码': 'development/style.md' - '如何运行ClickHouse测试': 'development/tests.md' - '开发者指南': 'development/developer_instruction.md' diff --git a/docs/tools/build.py b/docs/tools/build.py index 729229fdee7..0e855ce9f1e 100755 --- a/docs/tools/build.py +++ b/docs/tools/build.py @@ -45,6 +45,9 @@ def build_for_lang(lang, args): os.environ['SINGLE_PAGE'] = '0' config_path = os.path.join(args.docs_dir, 'toc_%s.yml' % lang) + if args.is_stable_release and not os.path.exists(config_path): + logging.warn('Skipping %s docs, because %s does not exist' % (lang, config_path)) + return try: theme_cfg = { @@ -74,6 +77,7 @@ def build_for_lang(lang, args): 'en': 'ClickHouse %s Documentation', 'ru': 'Документация ClickHouse %s', 'zh': 'ClickHouse文档 %s', + 'ja': 'ClickHouseドキュメント %s', 'fa': 'مستندات %sClickHouse' } @@ -241,13 +245,14 @@ if __name__ == '__main__': os.chdir(os.path.join(os.path.dirname(__file__), '..')) arg_parser = argparse.ArgumentParser() - arg_parser.add_argument('--lang', default='en,ru,zh,fa') + arg_parser.add_argument('--lang', default='en,ru,zh,ja,fa') arg_parser.add_argument('--docs-dir', default='.') arg_parser.add_argument('--theme-dir', default='mkdocs-material-theme') arg_parser.add_argument('--website-dir', default=os.path.join('..', 'website')) arg_parser.add_argument('--output-dir', default='build') arg_parser.add_argument('--enable-stable-releases', action='store_true') arg_parser.add_argument('--version-prefix', type=str, default='') + arg_parser.add_argument('--is-stable-release', action='store_true') arg_parser.add_argument('--skip-single-page', action='store_true') arg_parser.add_argument('--skip-pdf', action='store_true') arg_parser.add_argument('--skip-website', action='store_true') @@ -259,8 +264,6 @@ if __name__ == '__main__': from github import choose_latest_releases args.stable_releases = choose_latest_releases() if args.enable_stable_releases else [] - - logging.basicConfig( level=logging.DEBUG if args.verbose else logging.INFO, diff --git a/docs/tools/easy_edit.sh b/docs/tools/easy_edit.sh index 28c38453d0d..ed8a43fead7 100755 --- a/docs/tools/easy_edit.sh +++ b/docs/tools/easy_edit.sh @@ -14,7 +14,7 @@ popd rm -rf "${EDIT_DIR}" || true -for DOCS_LANG in en ru zh fa +for DOCS_LANG in en ru zh ja fa do for ARTICLE in ${ARTICLES} do diff --git a/docs/tools/github.py b/docs/tools/github.py index e07d8a0683a..d92dfe7435b 100644 --- a/docs/tools/github.py +++ b/docs/tools/github.py @@ -15,7 +15,7 @@ def choose_latest_releases(): candidates = requests.get('https://api.github.com/repos/ClickHouse/ClickHouse/tags?per_page=100').json() for tag in candidates: name = tag.get('name', '') - if 'v18' in name or 'stable' not in name: + if ('v18' in name) or ('stable' not in name) or ('prestable' in name): continue major_version = '.'.join((name.split('.', 2))[:2]) if major_version not in seen: @@ -33,6 +33,7 @@ def process_release(args, callback, release): tar.extractall(base_dir) args = copy.deepcopy(args) args.version_prefix = name + args.is_stable_release = True args.docs_dir = os.path.join(base_dir, os.listdir(base_dir)[0], 'docs') callback(args) diff --git a/docs/tools/make_links.sh b/docs/tools/make_links.sh index cca2f5feb6b..04c51424ec8 100755 --- a/docs/tools/make_links.sh +++ b/docs/tools/make_links.sh @@ -6,12 +6,12 @@ function do_make_links() { - langs=(en ru fa zh) + langs=(en ru zh ja fa) src_file="$1" for lang in "${langs[@]}" do # replacing "/./" with / - dst_file="../${lang}/${src_file}" + dst_file="../${lang}${src_file}" dst_file="${dst_file/\/\.\//\/}" mkdir -p $(dirname "${dst_file}") diff --git a/docs/tools/mkdocs-material-theme/assets/flags/ja.svg b/docs/tools/mkdocs-material-theme/assets/flags/ja.svg new file mode 100644 index 00000000000..a666c272523 --- /dev/null +++ b/docs/tools/mkdocs-material-theme/assets/flags/ja.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/docs/tools/mkdocs-material-theme/assets/javascripts/lunr/lunr.ja.js b/docs/tools/mkdocs-material-theme/assets/javascripts/lunr/lunr.ja.js new file mode 120000 index 00000000000..c20586be8a3 --- /dev/null +++ b/docs/tools/mkdocs-material-theme/assets/javascripts/lunr/lunr.ja.js @@ -0,0 +1 @@ +lunr.jp.js \ No newline at end of file diff --git a/docs/tools/mkdocs-material-theme/partials/flags.html b/docs/tools/mkdocs-material-theme/partials/flags.html index 26d6cdd8f9f..c7b06fbc4d0 100644 --- a/docs/tools/mkdocs-material-theme/partials/flags.html +++ b/docs/tools/mkdocs-material-theme/partials/flags.html @@ -1,4 +1,4 @@ -{% set alt_langs = [['en', 'English'], ['ru', 'Russian'], ['zh', 'Chinese'], ['fa', 'Farsi']] %} +{% set alt_langs = [['en', 'English'], ['ru', 'Russian'], ['zh', 'Chinese'], ['ja', 'Japanese'], ['fa', 'Farsi']] %} {% for alt_lang, alt_title in alt_langs %} > /etc/apt/sources.list diff --git a/docs/zh/getting_started/example_datasets/metrica.md b/docs/zh/getting_started/example_datasets/metrica.md new file mode 120000 index 00000000000..984023973eb --- /dev/null +++ b/docs/zh/getting_started/example_datasets/metrica.md @@ -0,0 +1 @@ +../../../en/getting_started/example_datasets/metrica.md \ No newline at end of file diff --git a/docs/zh/getting_started/index.md b/docs/zh/getting_started/index.md index b1c94600da0..c73181a6068 100644 --- a/docs/zh/getting_started/index.md +++ b/docs/zh/getting_started/index.md @@ -1,154 +1,10 @@ -# 入门指南 +# 入门 -## 系统要求 +如果您是ClickHouse的新手,并希望亲身体验它的性能,首先您需要通过 [安装过程](install.md). -如果从官方仓库安装,需要确保您使用的是x86\_64处理器构架的Linux并且支持SSE 4.2指令集 +之后,您可以选择以下选项之一: -检查是否支持SSE 4.2: +* [通过详细的教程](tutorial.md) +* [试验示例数据集](example_datasets/ontime.md) -```bash -grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not supported" -``` - -我们推荐使用Ubuntu或者Debian。终端必须使用UTF-8编码。 - -基于rpm的系统,你可以使用第三方的安装包:https://packagecloud.io/altinity/clickhouse 或者直接安装debian安装包。 - -ClickHouse还可以在FreeBSD与Mac OS X上工作。同时它可以在不支持SSE 4.2的x86\_64构架和AArch64 CPUs上编译。 - -## 安装 - -### 为Debian/Ubuntu安装 - -在`/etc/apt/sources.list` (或创建`/etc/apt/sources.list.d/clickhouse.list`文件)中添加仓库: - -```bash -$ deb http://repo.yandex.ru/clickhouse/deb/stable/ main/ -``` - -如果你想使用最新的测试版本,请使用'testing'替换'stable'。 - -然后运行: - -```bash -$ sudo apt-get install dirmngr # optional -$ sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E0C56BD4 # optional -$ sudo apt-get update -$ sudo apt-get install clickhouse-client clickhouse-server -``` - -你也可以从这里手动下载安装包:。 - -ClickHouse包含访问控制配置,它们位于`users.xml`文件中(与'config.xml'同目录)。 -默认情况下,允许从任何地方使用默认的‘default’用户无密码的访问ClickHouse。参考‘user/default/networks’。 -有关更多信息,请参考"Configuration files"部分。 - -###为CentOS/RedHat安装 - -Yandex ClickHouse团队建议使用官方预编译的`rpm`软件包,用于CentOS,RedHat和所有其他基于rpm的Linux发行版。 - -首先,您需要添加官方存储库: - -```bash -$ sudo yum install yum-utils -$ sudo rpm --import https://repo.yandex.ru/clickhouse/CLICKHOUSE-KEY.GPG -$ sudo yum-config-manager --add-repo https://repo.yandex.ru/clickhouse/rpm/stable/x86_64 -``` - -如果您想使用最新版本,请将`stable`替换为`testing`(建议您在测试环境中使用)。 - -然后运行这些命令以实际安装包: - -```bash -$ sudo yum install clickhouse-server clickhouse-client -``` - -您也可以从此处手动下载和安装软件包:。 - -###使用Docker安装 - -要在Docker中运行ClickHouse,请遵循[Docker Hub](https://hub.docker.com/r/yandex/clickhouse-server/)上的指南。这些镜像使用官方的`deb`包构建。 - -### 使用源码安装 - -具体编译方式可以参考build.md。 - -你可以编译并安装它们。 -你也可以直接使用而不进行安装。 - -```text -Client: dbms/programs/clickhouse-client -Server: dbms/programs/clickhouse-server -``` - -在服务器中为数据创建如下目录: - -```text -/opt/clickhouse/data/default/ -/opt/clickhouse/metadata/default/ -``` - -(它们可以在server config中配置。) -为需要的用户运行‘chown’ - -日志的路径可以在server config (src/dbms/programs/server/config.xml)中配置。 - -## 启动 - -可以运行如下命令在后台启动服务: - -```bash -sudo service clickhouse-server start -``` - -可以在`/var/log/clickhouse-server/`目录中查看日志。 - -如果服务没有启动,请检查配置文件 `/etc/clickhouse-server/config.xml`。 - -你也可以在控制台中直接启动服务: - -```bash -clickhouse-server --config-file=/etc/clickhouse-server/config.xml -``` - -在这种情况下,日志将被打印到控制台中,这在开发过程中很方便。 -如果配置文件在当前目录中,你可以不指定‘--config-file’参数。它默认使用‘./config.xml’。 - -你可以使用命令行客户端连接到服务: - -```bash -clickhouse-client -``` - -默认情况下它使用‘default’用户无密码的与localhost:9000服务建立连接。 -客户端也可以用于连接远程服务,例如: - -```bash -clickhouse-client --host=example.com -``` - -有关更多信息,请参考"Command-line client"部分。 - -检查系统是否工作: - -```bash -milovidov@hostname:~/work/metrica/src/dbms/src/Client$ ./clickhouse-client -ClickHouse client version 0.0.18749. -Connecting to localhost:9000. -Connected to ClickHouse server version 0.0.18749. -``` -```sql -:) SELECT 1 -``` -```text -┌─1─┐ -│ 1 │ -└───┘ -``` - -**恭喜,系统已经工作了!** - -为了继续进行实验,你可以尝试下载测试数据集。 - - -[Original article](https://clickhouse.yandex/docs/en/getting_started/) +[来源文章](https://clickhouse.yandex/docs/zh/getting_started/) diff --git a/docs/zh/getting_started/install.md b/docs/zh/getting_started/install.md new file mode 100644 index 00000000000..aa3cb816218 --- /dev/null +++ b/docs/zh/getting_started/install.md @@ -0,0 +1,152 @@ +## 系统要求 + +ClickHouse可以在任何具有x86\_64,AArch64或PowerPC64LE CPU架构的Linux,FreeBSD或Mac OS X上运行。 + +虽然预构建的二进制文件通常是为x86 \ _64编译并利用SSE 4.2指令集,但除非另有说明,否则使用支持它的CPU将成为额外的系统要求。这是检查当前CPU是否支持SSE 4.2的命令: + +``` bash +$ grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not supported" +``` + +要在不支持SSE 4.2或具有AArch64或PowerPC64LE体系结构的处理器上运行ClickHouse,您应该[通过源构建ClickHouse](#from-sources)进行适当的配置调整。 + +##可用的安装选项 + +### 为Debian/Ubuntu安装 {#from-deb-packages} + +在`/etc/apt/sources.list` (或创建`/etc/apt/sources.list.d/clickhouse.list`文件)中添加仓库: + +```text +deb http://repo.yandex.ru/clickhouse/deb/stable/ main/ +``` + +如果你想使用最新的测试版本,请使用'testing'替换'stable'。 + +然后运行: + +```bash +sudo apt-get install dirmngr # optional +sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E0C56BD4 # optional +sudo apt-get update +sudo apt-get install clickhouse-client clickhouse-server +``` + +你也可以从这里手动下载安装包:。 + +ClickHouse包含访问控制配置,它们位于`users.xml`文件中(与'config.xml'同目录)。 +默认情况下,允许从任何地方使用默认的‘default’用户无密码的访问ClickHouse。参考‘user/default/networks’。 +有关更多信息,请参考"Configuration files"部分。 + +###来自RPM包 {#from-rpm-packages} + +Yandex ClickHouse团队建议使用官方预编译的`rpm`软件包,用于CentOS,RedHat和所有其他基于rpm的Linux发行版。 + +首先,您需要添加官方存储库: + +```bash +sudo yum install yum-utils +sudo rpm --import https://repo.yandex.ru/clickhouse/CLICKHOUSE-KEY.GPG +sudo yum-config-manager --add-repo https://repo.yandex.ru/clickhouse/rpm/stable/x86_64 +``` + +如果您想使用最新版本,请将`stable`替换为`testing`(建议您在测试环境中使用)。 + +然后运行这些命令以实际安装包: + +```bash +sudo yum install clickhouse-server clickhouse-client +``` + +您也可以从此处手动下载和安装软件包:。 + +###来自Docker {#from-docker-image} + +要在Docker中运行ClickHouse,请遵循[Docker Hub](https://hub.docker.com/r/yandex/clickhouse-server/)上的指南。那些图像使用官方的`deb`包。 + +### 使用源码安装 {#from-sources} + +具体编译方式可以参考build.md。 + +你可以编译并安装它们。 +你也可以直接使用而不进行安装。 + +```text +Client: dbms/programs/clickhouse-client +Server: dbms/programs/clickhouse-server +``` + +在服务器中为数据创建如下目录: + +```text +/opt/clickhouse/data/default/ +/opt/clickhouse/metadata/default/ +``` + +(它们可以在server config中配置。) +为需要的用户运行‘chown’ + +日志的路径可以在server config (src/dbms/programs/server/config.xml)中配置。 + +## 启动 + +可以运行如下命令在后台启动服务: + +```bash +sudo service clickhouse-server start +``` + +可以在`/var/log/clickhouse-server/`目录中查看日志。 + +如果服务没有启动,请检查配置文件 `/etc/clickhouse-server/config.xml`。 + +你也可以在控制台中直接启动服务: + +```bash +clickhouse-server --config-file=/etc/clickhouse-server/config.xml +``` + +在这种情况下,日志将被打印到控制台中,这在开发过程中很方便。 +如果配置文件在当前目录中,你可以不指定‘--config-file’参数。它默认使用‘./config.xml’。 + +你可以使用命令行客户端连接到服务: + +```bash +clickhouse-client +``` + +默认情况下它使用‘default’用户无密码的与localhost:9000服务建立连接。 +客户端也可以用于连接远程服务,例如: + +```bash +clickhouse-client --host=example.com +``` + +有关更多信息,请参考"Command-line client"部分。 + +检查系统是否工作: + +```bash +milovidov@hostname:~/work/metrica/src/dbms/src/Client$ ./clickhouse-client +ClickHouse client version 0.0.18749. +Connecting to localhost:9000. +Connected to ClickHouse server version 0.0.18749. + +:) SELECT 1 + +SELECT 1 + +┌─1─┐ +│ 1 │ +└───┘ + +1 rows in set. Elapsed: 0.003 sec. + +:) +``` + +**恭喜,系统已经工作了!** + +为了继续进行实验,你可以尝试下载测试数据集。 + + +[Original article](https://clickhouse.yandex/docs/en/getting_started/install/) diff --git a/docs/zh/getting_started/tutorial.md b/docs/zh/getting_started/tutorial.md new file mode 120000 index 00000000000..8bc40816ab2 --- /dev/null +++ b/docs/zh/getting_started/tutorial.md @@ -0,0 +1 @@ +../../en/getting_started/tutorial.md \ No newline at end of file diff --git a/docs/zh/introduction/ya_metrika_task.md b/docs/zh/introduction/history.md similarity index 99% rename from docs/zh/introduction/ya_metrika_task.md rename to docs/zh/introduction/history.md index da4b18826e0..86fe02f84d5 100644 --- a/docs/zh/introduction/ya_metrika_task.md +++ b/docs/zh/introduction/history.md @@ -1,4 +1,4 @@ -# Yandex.Metrica的使用案例 +# ClickHouse历史 ClickHouse最初是为 [Yandex.Metrica](https://metrica.yandex.com/) [世界第二大Web分析平台](http://w3techs.com/technologies/overview/traffic_analysis/all) 而开发的。多年来一直作为该系统的核心组件被该系统持续使用着。目前为止,该系统在ClickHouse中有超过13万亿条记录,并且每天超过200多亿个事件被处理。它允许直接从原始数据中动态查询并生成报告。本文简要介绍了ClickHouse在其早期发展阶段的目标。 diff --git a/docs/zh/operations/monitoring.md b/docs/zh/operations/monitoring.md index cf51086f295..5ad0a1846a2 100644 --- a/docs/zh/operations/monitoring.md +++ b/docs/zh/operations/monitoring.md @@ -34,4 +34,4 @@ ClickHouse 收集的指标项: 此外,您可以通过HTTP API监视服务器可用性。 将HTTP GET请求发送到 `/`。 如果服务器可用,它将以 `200 OK` 响应。 -要监视服务器集群的配置中,应设置[max_replica_delay_for_distributed_queries](settings/settings.md#settings-max_replica_delay_for_distributed_queries)参数并使用HTTP资源`/replicas-delay`。 如果副本可用,并且不延迟在其他副本之后,则对`/replicas-delay`的请求将返回200 OK。 如果副本被延迟,它将返回有关延迟信息。 +要监视服务器集群的配置中,应设置[max_replica_delay_for_distributed_queries](settings/settings.md#settings-max_replica_delay_for_distributed_queries)参数并使用HTTP资源`/replicas_status`。 如果副本可用,并且不延迟在其他副本之后,则对`/replicas_status`的请求将返回200 OK。 如果副本被延迟,它将返回有关延迟信息。 diff --git a/docs/zh/query_language/functions/higher_order_functions.md b/docs/zh/query_language/functions/higher_order_functions.md index e64db0bc8d3..39c6770e5b8 100644 --- a/docs/zh/query_language/functions/higher_order_functions.md +++ b/docs/zh/query_language/functions/higher_order_functions.md @@ -12,7 +12,7 @@ 除了'arrayMap'和'arrayFilter'以外的所有其他函数,都可以省略第一个参数(lambda函数)。在这种情况下,默认返回数组元素本身。 -### arrayMap(func, arr1, ...) +### arrayMap(func, arr1, ...) {#higher_order_functions-array-map} 将arr 将从'func'函数的原始应用程序获得的数组返回到'arr'数组中的每个元素。 diff --git a/docs/zh/query_language/functions/introspection.md b/docs/zh/query_language/functions/introspection.md new file mode 120000 index 00000000000..b1a487e9c77 --- /dev/null +++ b/docs/zh/query_language/functions/introspection.md @@ -0,0 +1 @@ +../../../en/query_language/functions/introspection.md \ No newline at end of file diff --git a/libs/libcommon/CMakeLists.txt b/libs/libcommon/CMakeLists.txt index 357e457b240..3e58cba0164 100644 --- a/libs/libcommon/CMakeLists.txt +++ b/libs/libcommon/CMakeLists.txt @@ -53,6 +53,7 @@ add_library (common include/common/phdr_cache.h include/ext/bit_cast.h + include/ext/chrono_io.h include/ext/collection_cast.h include/ext/enumerate.h include/ext/function_traits.h diff --git a/libs/libcommon/include/ext/chrono_io.h b/libs/libcommon/include/ext/chrono_io.h new file mode 100644 index 00000000000..8fa448b9e6a --- /dev/null +++ b/libs/libcommon/include/ext/chrono_io.h @@ -0,0 +1,37 @@ +#pragma once + +#include +#include +#include + + +namespace ext +{ + template + std::string to_string(const std::chrono::time_point & tp) + { + return DateLUT::instance().timeToString(std::chrono::system_clock::to_time_t(tp)); + } + + template > + std::string to_string(const std::chrono::duration & dur) + { + auto seconds_as_int = std::chrono::duration_cast(dur); + if (seconds_as_int == dur) + return std::to_string(seconds_as_int.count()) + "s"; + auto seconds_as_double = std::chrono::duration_cast>(dur); + return std::to_string(seconds_as_double.count()) + "s"; + } + + template + std::ostream & operator<<(std::ostream & o, const std::chrono::time_point & tp) + { + return o << to_string(tp); + } + + template > + std::ostream & operator<<(std::ostream & o, const std::chrono::duration & dur) + { + return o << to_string(dur); + } +} diff --git a/libs/libcommon/include/ext/range.h b/libs/libcommon/include/ext/range.h index 61b644c2ce5..c379d453f7b 100644 --- a/libs/libcommon/include/ext/range.h +++ b/libs/libcommon/include/ext/range.h @@ -1,46 +1,42 @@ #pragma once #include -#include -#include -#include +#include +#include -/** Numeric range iterator, used to represent a half-closed interval [begin, end). - * In conjunction with std::reverse_iterator allows for forward and backward iteration - * over corresponding interval. - */ namespace ext { - template - using range_iterator = boost::counting_iterator; - - /** Range-based for loop adapter for (reverse_)range_iterator. - * By and large should be in conjunction with ext::range and ext::reverse_range. - */ - template - struct range_wrapper + /// For loop adaptor which is used to iterate through a half-closed interval [begin, end). + template + inline auto range(BeginType begin, EndType end) { - using value_type = typename std::remove_reference::type; - using iterator = range_iterator; + using CommonType = typename std::common_type::type; + return boost::counting_range(begin, end); + } - value_type begin_; - value_type end_; - - iterator begin() const { return iterator(begin_); } - iterator end() const { return iterator(end_); } - }; - - /** Constructs range_wrapper for forward-iteration over [begin, end) in range-based for loop. - * Usage example: - * for (const auto i : ext::range(0, 4)) print(i); - * Output: - * 0 1 2 3 - */ - template - inline range_wrapper::type> range(T1 begin, T2 end) + template + inline auto range(Type end) { - using common_type = typename std::common_type::type; - return { static_cast(begin), static_cast(end) }; + return range(static_cast(0), end); + } + + /// The same as range(), but every value is casted statically to a specified `ValueType`. + /// This is useful to iterate through all constants of a enum. + template + inline auto range_with_static_cast(BeginType begin, EndType end) + { + using CommonType = typename std::common_type::type; + if constexpr (std::is_same_v) + return boost::counting_range(begin, end); + else + return boost::counting_range(begin, end) + | boost::adaptors::transformed([](CommonType x) -> ValueType { return static_cast(x); }); + } + + template + inline auto range_with_static_cast(EndType end) + { + return range_with_static_cast(static_cast(0), end); } } diff --git a/libs/libcommon/include/ext/shared_ptr_helper.h b/libs/libcommon/include/ext/shared_ptr_helper.h index ca7219e6261..df132382fa6 100644 --- a/libs/libcommon/include/ext/shared_ptr_helper.h +++ b/libs/libcommon/include/ext/shared_ptr_helper.h @@ -20,4 +20,20 @@ struct shared_ptr_helper } }; + +template +struct is_shared_ptr +{ + static constexpr bool value = false; +}; + + +template +struct is_shared_ptr> +{ + static constexpr bool value = true; +}; + +template +inline constexpr bool is_shared_ptr_v = is_shared_ptr::value; } diff --git a/libs/libdaemon/src/BaseDaemon.cpp b/libs/libdaemon/src/BaseDaemon.cpp index 931d91bd8b5..15b61c9b454 100644 --- a/libs/libdaemon/src/BaseDaemon.cpp +++ b/libs/libdaemon/src/BaseDaemon.cpp @@ -110,7 +110,7 @@ static void faultSignalHandler(int sig, siginfo_t * info, void * context) out.next(); - if (sig != SIGPROF) /// This signal is used for debugging. + if (sig != SIGTSTP) /// This signal is used for debugging. { /// The time that is usually enough for separate thread to print info into log. ::sleep(10); @@ -719,9 +719,9 @@ void BaseDaemon::initializeTerminationAndSignalProcessing() } }; - /// SIGPROF is added for debugging purposes. To output a stack trace of any running thread at anytime. + /// SIGTSTP is added for debugging purposes. To output a stack trace of any running thread at anytime. - add_signal_handler({SIGABRT, SIGSEGV, SIGILL, SIGBUS, SIGSYS, SIGFPE, SIGPIPE, SIGPROF}, faultSignalHandler); + add_signal_handler({SIGABRT, SIGSEGV, SIGILL, SIGBUS, SIGSYS, SIGFPE, SIGPIPE, SIGTSTP}, faultSignalHandler); add_signal_handler({SIGHUP, SIGUSR1}, closeLogsSignalHandler); add_signal_handler({SIGINT, SIGQUIT, SIGTERM}, terminateRequestedSignalHandler); @@ -731,7 +731,6 @@ void BaseDaemon::initializeTerminationAndSignalProcessing() signal_listener.reset(new SignalListener(*this)); signal_listener_thread.start(*signal_listener); - } void BaseDaemon::logRevision() const @@ -891,4 +890,3 @@ void BaseDaemon::waitForTerminationRequest() std::unique_lock lock(signal_handler_mutex); signal_event.wait(lock, [this](){ return terminate_signals_counter > 0; }); } - diff --git a/libs/libglibc-compatibility/CMakeLists.txt b/libs/libglibc-compatibility/CMakeLists.txt index 2dbec5fa772..1b40cd4f4cc 100644 --- a/libs/libglibc-compatibility/CMakeLists.txt +++ b/libs/libglibc-compatibility/CMakeLists.txt @@ -31,6 +31,9 @@ if (GLIBC_COMPATIBILITY) list(APPEND glibc_compatibility_sources libcxxabi/cxa_thread_atexit.cpp) endif() + # Need to omit frame pointers to match the performance of glibc + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer") + add_library(glibc-compatibility STATIC ${glibc_compatibility_sources}) target_include_directories(glibc-compatibility PRIVATE libcxxabi ${musl_arch_include_dir}) diff --git a/libs/libglibc-compatibility/musl/aarch64/syscall_arch.h b/libs/libglibc-compatibility/musl/aarch64/syscall_arch.h new file mode 100644 index 00000000000..0588c15484c --- /dev/null +++ b/libs/libglibc-compatibility/musl/aarch64/syscall_arch.h @@ -0,0 +1,3 @@ +#define VDSO_USEFUL +#define VDSO_CGT_SYM "__kernel_clock_gettime" +#define VDSO_CGT_VER "LINUX_2.6.39" diff --git a/libs/libglibc-compatibility/musl/clock_gettime.c b/libs/libglibc-compatibility/musl/clock_gettime.c new file mode 100644 index 00000000000..574f9b83d15 --- /dev/null +++ b/libs/libglibc-compatibility/musl/clock_gettime.c @@ -0,0 +1,108 @@ +#include +#include +#include +#include "atomic.h" +#include "musl_features.h" +#include "syscall.h" + +#ifdef VDSO_CGT_SYM + +static void *volatile vdso_func; + +#ifdef VDSO_CGT32_SYM +static void *volatile vdso_func_32; +static int cgt_time32_wrap(clockid_t clk, struct timespec *ts) +{ + long ts32[2]; + int (*f)(clockid_t, long[2]) = + (int (*)(clockid_t, long[2]))vdso_func_32; + int r = f(clk, ts32); + if (!r) { + /* Fallback to syscalls if time32 overflowed. Maybe + * we lucked out and somehow migrated to a kernel with + * time64 syscalls available. */ + if (ts32[0] < 0) { + a_cas_p(&vdso_func, (void *)cgt_time32_wrap, 0); + return -ENOSYS; + } + ts->tv_sec = ts32[0]; + ts->tv_nsec = ts32[1]; + } + return r; +} +#endif + +static int cgt_init(clockid_t clk, struct timespec *ts) +{ + void *p = __vdsosym(VDSO_CGT_VER, VDSO_CGT_SYM); +#ifdef VDSO_CGT32_SYM + if (!p) { + void *q = __vdsosym(VDSO_CGT32_VER, VDSO_CGT32_SYM); + if (q) { + a_cas_p(&vdso_func_32, 0, q); + p = cgt_time32_wrap; + } + } +#endif + int (*f)(clockid_t, struct timespec *) = + (int (*)(clockid_t, struct timespec *))p; + a_cas_p(&vdso_func, (void *)cgt_init, p); + return f ? f(clk, ts) : -ENOSYS; +} + +static void *volatile vdso_func = (void *)cgt_init; + +#endif + +int __clock_gettime(clockid_t clk, struct timespec *ts) +{ + int r; + +#ifdef VDSO_CGT_SYM + int (*f)(clockid_t, struct timespec *) = + (int (*)(clockid_t, struct timespec *))vdso_func; + if (f) { + r = f(clk, ts); + if (!r) return r; + if (r == -EINVAL) return __syscall_ret(r); + /* Fall through on errors other than EINVAL. Some buggy + * vdso implementations return ENOSYS for clocks they + * can't handle, rather than making the syscall. This + * also handles the case where cgt_init fails to find + * a vdso function to use. */ + } +#endif + +#ifdef SYS_clock_gettime64 + r = -ENOSYS; + if (sizeof(time_t) > 4) + r = __syscall(SYS_clock_gettime64, clk, ts); + if (SYS_clock_gettime == SYS_clock_gettime64 || r!=-ENOSYS) + return __syscall_ret(r); + long ts32[2]; + r = __syscall(SYS_clock_gettime, clk, ts32); + if (r==-ENOSYS && clk==CLOCK_REALTIME) { + r = __syscall(SYS_gettimeofday, ts32, 0); + ts32[1] *= 1000; + } + if (!r) { + ts->tv_sec = ts32[0]; + ts->tv_nsec = ts32[1]; + return r; + } + return __syscall_ret(r); +#else + r = __syscall(SYS_clock_gettime, clk, ts); + if (r == -ENOSYS) { + if (clk == CLOCK_REALTIME) { + __syscall(SYS_gettimeofday, ts, 0); + ts->tv_nsec = (int)ts->tv_nsec * 1000; + return 0; + } + r = -EINVAL; + } + return __syscall_ret(r); +#endif +} + +weak_alias(__clock_gettime, clock_gettime); diff --git a/libs/libglibc-compatibility/musl/clock_nanosleep.c b/libs/libglibc-compatibility/musl/clock_nanosleep.c new file mode 100644 index 00000000000..bf71a5e84ac --- /dev/null +++ b/libs/libglibc-compatibility/musl/clock_nanosleep.c @@ -0,0 +1,27 @@ +#include +#include +#include +#include "musl_features.h" +#include "syscall.h" + +int __clock_nanosleep(clockid_t clk, int flags, const struct timespec * req, struct timespec * rem) +{ + if (clk == CLOCK_THREAD_CPUTIME_ID) + return EINVAL; + int old_cancel_type; + int status; + /// We cannot port __syscall_cp because musl has very limited cancellation point implementation. + /// For example, c++ destructors won't get called and exception unwinding isn't implemented. + /// Instead, we use normal __syscall here and turn on the asynchrous cancel mode to allow + /// cancel. This works because nanosleep doesn't contain any resource allocations or + /// deallocations. + pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_cancel_type); + if (clk == CLOCK_REALTIME && !flags) + status = -__syscall(SYS_nanosleep, req, rem); + else + status = -__syscall(SYS_clock_nanosleep, clk, flags, req, rem); + pthread_setcanceltype(old_cancel_type, NULL); + return status; +} + +weak_alias(__clock_nanosleep, clock_nanosleep); diff --git a/libs/libglibc-compatibility/musl/sched_getcpu.c b/libs/libglibc-compatibility/musl/sched_getcpu.c index d0e171dac95..4ec5eaf6796 100644 --- a/libs/libglibc-compatibility/musl/sched_getcpu.c +++ b/libs/libglibc-compatibility/musl/sched_getcpu.c @@ -1,14 +1,11 @@ #define _GNU_SOURCE #include #include -#include #include "syscall.h" #include "atomic.h" #ifdef VDSO_GETCPU_SYM -void *__vdsosym(const char *, const char *); - static void *volatile vdso_func; typedef long (*getcpu_f)(unsigned *, unsigned *, void *); diff --git a/libs/libglibc-compatibility/musl/syscall.h b/libs/libglibc-compatibility/musl/syscall.h index 49edd7244d3..70b4688f642 100644 --- a/libs/libglibc-compatibility/musl/syscall.h +++ b/libs/libglibc-compatibility/musl/syscall.h @@ -1,5 +1,8 @@ #pragma once +#include +#include + typedef long syscall_arg_t; __attribute__((visibility("hidden"))) @@ -7,3 +10,6 @@ long __syscall_ret(unsigned long); __attribute__((visibility("hidden"))) long __syscall(syscall_arg_t, ...); + +__attribute__((visibility("hidden"))) +void *__vdsosym(const char *, const char *); diff --git a/libs/libglibc-compatibility/musl/vdso.c b/libs/libglibc-compatibility/musl/vdso.c new file mode 100644 index 00000000000..c0dd0f33e4e --- /dev/null +++ b/libs/libglibc-compatibility/musl/vdso.c @@ -0,0 +1,105 @@ +#include +#include +#include +#include +#include +#include +#include "syscall.h" + +#ifdef VDSO_USEFUL + +#if ULONG_MAX == 0xffffffff +typedef Elf32_Ehdr Ehdr; +typedef Elf32_Phdr Phdr; +typedef Elf32_Sym Sym; +typedef Elf32_Verdef Verdef; +typedef Elf32_Verdaux Verdaux; +#else +typedef Elf64_Ehdr Ehdr; +typedef Elf64_Phdr Phdr; +typedef Elf64_Sym Sym; +typedef Elf64_Verdef Verdef; +typedef Elf64_Verdaux Verdaux; +#endif + +static int checkver(Verdef *def, int vsym, const char *vername, char *strings) +{ + vsym &= 0x7fff; + for (;;) { + if (!(def->vd_flags & VER_FLG_BASE) + && (def->vd_ndx & 0x7fff) == vsym) + break; + if (def->vd_next == 0) + return 0; + def = (Verdef *)((char *)def + def->vd_next); + } + Verdaux *aux = (Verdaux *)((char *)def + def->vd_aux); + return !strcmp(vername, strings + aux->vda_name); +} + +#define OK_TYPES (1<e_phoff); + size_t *dynv=0, base=-1; + for (i=0; ie_phnum; i++, ph=(void *)((char *)ph+eh->e_phentsize)) { + if (ph->p_type == PT_LOAD) + base = (size_t)eh + ph->p_offset - ph->p_vaddr; + else if (ph->p_type == PT_DYNAMIC) + dynv = (void *)((char *)eh + ph->p_offset); + } + if (!dynv || base==(size_t)-1) return 0; + + char *strings = 0; + Sym *syms = 0; + Elf_Symndx *hashtab = 0; + uint16_t *versym = 0; + Verdef *verdef = 0; + + for (i=0; dynv[i]; i+=2) { + void *p = (void *)(base + dynv[i+1]); + switch(dynv[i]) { + case DT_STRTAB: strings = p; break; + case DT_SYMTAB: syms = p; break; + case DT_HASH: hashtab = p; break; + case DT_VERSYM: versym = p; break; + case DT_VERDEF: verdef = p; break; + } + } + + if (!strings || !syms || !hashtab) return 0; + if (!verdef) versym = 0; + + for (i=0; i>4) & OK_BINDS)) continue; + if (!syms[i].st_shndx) continue; + if (strcmp(name, strings+syms[i].st_name)) continue; + if (versym && !checkver(verdef, versym[i], vername, strings)) + continue; + return (void *)(base + syms[i].st_value); + } + + return 0; +} + +#endif diff --git a/libs/libglibc-compatibility/musl/x86_64/syscall_arch.h b/libs/libglibc-compatibility/musl/x86_64/syscall_arch.h new file mode 100644 index 00000000000..b20e812aa1a --- /dev/null +++ b/libs/libglibc-compatibility/musl/x86_64/syscall_arch.h @@ -0,0 +1,5 @@ +#define VDSO_USEFUL +#define VDSO_CGT_SYM "__vdso_clock_gettime" +#define VDSO_CGT_VER "LINUX_2.6" +#define VDSO_GETCPU_SYM "__vdso_getcpu" +#define VDSO_GETCPU_VER "LINUX_2.6" diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt index b3df25d13e6..fcf56e82b52 100644 --- a/utils/CMakeLists.txt +++ b/utils/CMakeLists.txt @@ -21,7 +21,6 @@ if (NOT DEFINED ENABLE_UTILS OR ENABLE_UTILS) add_subdirectory (corrector_utf8) add_subdirectory (zookeeper-cli) add_subdirectory (zookeeper-dump-tree) - add_subdirectory (zookeeper-copy-tree) add_subdirectory (zookeeper-remove-by-list) add_subdirectory (zookeeper-create-entry-to-download-part) add_subdirectory (zookeeper-adjust-block-numbers-to-parts) diff --git a/utils/zookeeper-copy-tree/CMakeLists.txt b/utils/zookeeper-copy-tree/CMakeLists.txt deleted file mode 100644 index c4dc88d700c..00000000000 --- a/utils/zookeeper-copy-tree/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -add_executable (zookeeper-copy-tree main.cpp ${SRCS}) -target_link_libraries(zookeeper-copy-tree PRIVATE clickhouse_common_zookeeper clickhouse_common_io ${Boost_PROGRAM_OPTIONS_LIBRARY}) diff --git a/utils/zookeeper-copy-tree/main.cpp b/utils/zookeeper-copy-tree/main.cpp deleted file mode 100644 index 7bc7316b4af..00000000000 --- a/utils/zookeeper-copy-tree/main.cpp +++ /dev/null @@ -1,149 +0,0 @@ -#include -#include -#include - -#include - -#include - -namespace DB -{ -namespace ErrorCodes -{ - -extern const int UNEXPECTED_NODE_IN_ZOOKEEPER; - -} -} - -int main(int argc, char ** argv) -try -{ - boost::program_options::options_description desc("Allowed options"); - desc.add_options() - ("help,h", "produce help message") - ("from", boost::program_options::value()->required(), - "addresses of source ZooKeeper instances, comma separated. Example: example01e.yandex.ru:2181") - ("from-path", boost::program_options::value()->required(), - "where to copy from") - ("to", boost::program_options::value()->required(), - "addresses of destination ZooKeeper instances, comma separated. Example: example01e.yandex.ru:2181") - ("to-path", boost::program_options::value()->required(), - "where to copy to") - ; - - boost::program_options::variables_map options; - boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), options); - - if (options.count("help")) - { - std::cout << "Copy a ZooKeeper tree to another cluster." << std::endl; - std::cout << "Usage: " << argv[0] << " [options]" << std::endl; - std::cout << "WARNING: it is almost useless as it is impossible to corretly copy sequential nodes" << std::endl; - std::cout << desc << std::endl; - return 1; - } - - zkutil::ZooKeeper from_zookeeper(options.at("from").as()); - zkutil::ZooKeeper to_zookeeper(options.at("to").as()); - - std::string from_path = options.at("from-path").as(); - std::string to_path = options.at("to-path").as(); - - if (to_zookeeper.exists(to_path)) - throw DB::Exception("Destination path: " + to_path + " already exists, aborting.", - DB::ErrorCodes::UNEXPECTED_NODE_IN_ZOOKEEPER); - - struct Node - { - Node( - std::string path_, - std::future get_future_, - std::future children_future_, - Node * parent_) - : path(std::move(path_)) - , get_future(std::move(get_future_)) - , children_future(std::move(children_future_)) - , parent(parent_) - { - } - - std::string path; - std::future get_future; - std::future children_future; - - Node * parent = nullptr; - std::future create_future; - bool created = false; - bool deleted = false; - bool ephemeral = false; - }; - - std::list nodes_queue; - nodes_queue.emplace_back( - from_path, from_zookeeper.asyncGet(from_path), from_zookeeper.asyncGetChildren(from_path), nullptr); - - to_zookeeper.createAncestors(to_path); - - for (auto it = nodes_queue.begin(); it != nodes_queue.end(); ++it) - { - Coordination::GetResponse get_response; - Coordination::ListResponse children_response; - try - { - get_response = it->get_future.get(); - children_response = it->children_future.get(); - } - catch (const Coordination::Exception & e) - { - if (e.code == Coordination::ZNONODE) - { - it->deleted = true; - continue; - } - throw; - } - - if (get_response.stat.ephemeralOwner) - { - it->ephemeral = true; - continue; - } - - if (it->parent && !it->parent->created) - { - it->parent->create_future.get(); - it->parent->created = true; - std::cerr << it->parent->path << " copied!" << std::endl; - } - - std::string new_path = it->path; - new_path.replace(0, from_path.length(), to_path); - it->create_future = to_zookeeper.asyncCreate(new_path, get_response.data, zkutil::CreateMode::Persistent); - get_response.data.clear(); - get_response.data.shrink_to_fit(); - - for (const auto & name : children_response.names) - { - std::string child_path = it->path == "/" ? it->path + name : it->path + '/' + name; - nodes_queue.emplace_back( - child_path, from_zookeeper.asyncGet(child_path), from_zookeeper.asyncGetChildren(child_path), - &(*it)); - } - } - - for (auto it = nodes_queue.begin(); it != nodes_queue.end(); ++it) - { - if (!it->created && !it->deleted && !it->ephemeral) - { - it->create_future.get(); - it->created = true; - std::cerr << it->path << " copied!" << std::endl; - } - } -} -catch (...) -{ - std::cerr << DB::getCurrentExceptionMessage(true) << '\n'; - throw; -} diff --git a/website/nginx/default.conf b/website/nginx/default.conf index 98edad41055..fc029323fe2 100644 --- a/website/nginx/default.conf +++ b/website/nginx/default.conf @@ -14,6 +14,8 @@ server { } rewrite ^/docs/$ https://clickhouse.yandex/docs/en/ permanent; + rewrite ^/tutorial.html$ https://clickhouse.yandex/docs/en/getting_started/tutorial/ permanent; + rewrite ^/presentations/(.*)$ https://clickhouse.github.io/clickhouse-presentations/$1 permanent; rewrite ^/reference_en.html$ https://clickhouse.yandex/docs/en/single/ permanent; rewrite ^/reference_ru.html$ https://clickhouse.yandex/docs/ru/single/ permanent; rewrite ^/presentations/(.*)$ https://clickhouse.github.io/clickhouse-presentations/$1 permanent; diff --git a/website/robots.txt b/website/robots.txt index db843cdbf06..82708ceea95 100644 --- a/website/robots.txt +++ b/website/robots.txt @@ -2,16 +2,16 @@ User-agent: * Disallow: /docs/en/single/ Disallow: /docs/ru/single/ Disallow: /docs/zh/single/ +Disallow: /docs/ja/single/ Disallow: /docs/fa/single/ Disallow: /docs/v1* Disallow: /docs/v2* Disallow: /docs/v3* Disallow: /docs/en/search.html Disallow: /docs/ru/search.html -Disallow: /docs/fa/search.html +Disallow: /docs/ja/search.html Disallow: /docs/zh/search.html -Disallow: /deprecated/reference_en.html -Disallow: /deprecated/reference_ru.html +Disallow: /docs/fa/search.html Allow: / Host: https://clickhouse.yandex Sitemap: https://clickhouse.yandex/docs/sitemap.xml diff --git a/website/sitemap.xml b/website/sitemap.xml index db7bd695b58..e9319dc8701 100644 --- a/website/sitemap.xml +++ b/website/sitemap.xml @@ -9,6 +9,9 @@ https://clickhouse.yandex/docs/zh/sitemap.xml + + https://clickhouse.yandex/docs/ja/sitemap.xml + https://clickhouse.yandex/docs/fa/sitemap.xml diff --git a/website/tutorial.html b/website/tutorial.html deleted file mode 100644 index 52216f61dc8..00000000000 --- a/website/tutorial.html +++ /dev/null @@ -1,649 +0,0 @@ - - - - - ClickHouse Quick Start Guide - - - - - - - - - - -
- -
- - -

ClickHouse

-

Tutorial

-
- -

Let's get started with sample dataset from open sources. We will use USA civil flights data since 1987 till 2015. - It's hard to call this sample a Big Data (contains 166 millions rows, 63 Gb of uncompressed data) but this - allows us to quickly get to work. Dataset is available for download here. - Also you may download it from the original datasource as described here.

- -

Firstly we will deploy ClickHouse to a single server. Below that we will also review the process of deployment to - a cluster with support for sharding and replication.

- -

On Ubuntu and Debian Linux ClickHouse can be installed from packages. - For other Linux distributions you can compile - ClickHouse from sources and then install.

- -

clickhouse-client package contains clickhouse-client application — - interactive ClickHouse client. clickhouse-common contains a clickhouse-server binary file. clickhouse-server - — contains config files for the clickhouse-server.

- -

Server config files are located in /etc/clickhouse-server/. Before getting to work please notice the path - element in config. Path determines the location for data storage. It's not really handy to directly - edit config.xml file considering package updates. Recommended way is to override the config elements in - files of config.d directory. - Also you may want to set up access - rights at the start.

- -

clickhouse-server won't be launched automatically after package installation. It won't be automatically - restarted after updates either. Start the server with: -

sudo service clickhouse-server start
- Default location for server logs is /var/log/clickhouse-server/ - Server is ready to handle client connections once "Ready for connections" message was logged.

- -

Use clickhouse-client to connect to the server.

- -
Tips for clickhouse-client -
- Interactive mode: -
-clickhouse-client
-clickhouse-client --host=... --port=... --user=... --password=...
-
- Enable multiline queries: -
-clickhouse-client -m
-clickhouse-client --multiline
-
- Run queries in batch-mode: -
-clickhouse-client --query='SELECT 1'
-echo 'SELECT 1' | clickhouse-client
-
- Insert data from file of a specified format: -
-clickhouse-client --query='INSERT INTO table VALUES' < data.txt
-clickhouse-client --query='INSERT INTO table FORMAT TabSeparated' < data.tsv
-
-
-
- -

Create table for sample dataset

-
Create table query -
-
-$ clickhouse-client --multiline
-ClickHouse client version 0.0.53720.
-Connecting to localhost:9000.
-Connected to ClickHouse server version 0.0.53720.
-
-:) CREATE TABLE ontime
-(
-    Year UInt16,
-    Quarter UInt8,
-    Month UInt8,
-    DayofMonth UInt8,
-    DayOfWeek UInt8,
-    FlightDate Date,
-    UniqueCarrier FixedString(7),
-    AirlineID Int32,
-    Carrier FixedString(2),
-    TailNum String,
-    FlightNum String,
-    OriginAirportID Int32,
-    OriginAirportSeqID Int32,
-    OriginCityMarketID Int32,
-    Origin FixedString(5),
-    OriginCityName String,
-    OriginState FixedString(2),
-    OriginStateFips String,
-    OriginStateName String,
-    OriginWac Int32,
-    DestAirportID Int32,
-    DestAirportSeqID Int32,
-    DestCityMarketID Int32,
-    Dest FixedString(5),
-    DestCityName String,
-    DestState FixedString(2),
-    DestStateFips String,
-    DestStateName String,
-    DestWac Int32,
-    CRSDepTime Int32,
-    DepTime Int32,
-    DepDelay Int32,
-    DepDelayMinutes Int32,
-    DepDel15 Int32,
-    DepartureDelayGroups String,
-    DepTimeBlk String,
-    TaxiOut Int32,
-    WheelsOff Int32,
-    WheelsOn Int32,
-    TaxiIn Int32,
-    CRSArrTime Int32,
-    ArrTime Int32,
-    ArrDelay Int32,
-    ArrDelayMinutes Int32,
-    ArrDel15 Int32,
-    ArrivalDelayGroups Int32,
-    ArrTimeBlk String,
-    Cancelled UInt8,
-    CancellationCode FixedString(1),
-    Diverted UInt8,
-    CRSElapsedTime Int32,
-    ActualElapsedTime Int32,
-    AirTime Int32,
-    Flights Int32,
-    Distance Int32,
-    DistanceGroup UInt8,
-    CarrierDelay Int32,
-    WeatherDelay Int32,
-    NASDelay Int32,
-    SecurityDelay Int32,
-    LateAircraftDelay Int32,
-    FirstDepTime String,
-    TotalAddGTime String,
-    LongestAddGTime String,
-    DivAirportLandings String,
-    DivReachedDest String,
-    DivActualElapsedTime String,
-    DivArrDelay String,
-    DivDistance String,
-    Div1Airport String,
-    Div1AirportID Int32,
-    Div1AirportSeqID Int32,
-    Div1WheelsOn String,
-    Div1TotalGTime String,
-    Div1LongestGTime String,
-    Div1WheelsOff String,
-    Div1TailNum String,
-    Div2Airport String,
-    Div2AirportID Int32,
-    Div2AirportSeqID Int32,
-    Div2WheelsOn String,
-    Div2TotalGTime String,
-    Div2LongestGTime String,
-    Div2WheelsOff String,
-    Div2TailNum String,
-    Div3Airport String,
-    Div3AirportID Int32,
-    Div3AirportSeqID Int32,
-    Div3WheelsOn String,
-    Div3TotalGTime String,
-    Div3LongestGTime String,
-    Div3WheelsOff String,
-    Div3TailNum String,
-    Div4Airport String,
-    Div4AirportID Int32,
-    Div4AirportSeqID Int32,
-    Div4WheelsOn String,
-    Div4TotalGTime String,
-    Div4LongestGTime String,
-    Div4WheelsOff String,
-    Div4TailNum String,
-    Div5Airport String,
-    Div5AirportID Int32,
-    Div5AirportSeqID Int32,
-    Div5WheelsOn String,
-    Div5TotalGTime String,
-    Div5LongestGTime String,
-    Div5WheelsOff String,
-    Div5TailNum String
-)
-ENGINE = MergeTree(FlightDate, (Year, FlightDate), 8192);
-
-
-
- -

Now we have a table of MergeTree type. - MergeTree table type is recommended for usage in production. Table of this kind has a primary key used for - incremental sort of table data. This allows fast execution of queries in ranges of a primary key.

- - -

Note - We store ad network banners impressions logs in ClickHouse. Each table entry looks like: - [Advertiser ID, Impression ID, attribute1, attribute2, …]. - Let assume that our aim is to provide a set of reports for each advertiser. Common and frequently demanded query - would be to count impressions for a specific Advertiser ID. This means that table primary key should start with - Advertiser ID. In this case ClickHouse needs to read smaller amount of data to perform the query for a - given Advertiser ID. -

- -

Load data

-
xz -v -c -d < ontime.csv.xz | clickhouse-client --query="INSERT INTO ontime FORMAT CSV"
-

ClickHouse INSERT query allows to load data in any supported - format. Data load requires just O(1) RAM consumption. INSERT query can receive any data volume as input. - It's strongly recommended to insert data with not too small - size blocks. Notice that insert of blocks with size up to max_insert_block_size (= 1 048 576 - rows by default) is an atomic operation: data block will be inserted completely or not inserted at all. In case - of disconnect during insert operation you may not know if the block was inserted successfully. To achieve - exactly-once semantics ClickHouse supports idempotency for replicated tables. This means - that you may retry insert of the same data block (possibly on a different replicas) but this block will be - inserted just once. Anyway in this guide we will load data from our localhost so we may not take care about data - blocks generation and exactly-once semantics.

- -

INSERT query into tables of MergeTree type is non-blocking (so does a SELECT query). You can execute SELECT - queries right after of during insert operation.

- -

Our sample dataset is a bit not optimal. There are two reasons.

- -

The first is that String data type is used in cases when Enum or numeric type would fit best.

- -

When set of possible values is determined and known to be small. (E.g. OS name, browser - vendors etc.) it's recommended to use Enums or numbers to improve performance. - When set of possible values is not limited (search query, URL, etc.) just go ahead with String.

- -

The second is that dataset contains redundant fields like Year, Quarter, Month, DayOfMonth, DayOfWeek. In fact a - single FlightDate would be enough. Most likely they have been added to improve performance for other DBMS'es - which DateTime handling functions may be not efficient.

- -

ClickHouse functions - for operating with DateTime fields are well-optimized so such redundancy is not required. Anyway much - columns is not a reason to worry — ClickHouse is a column-oriented - DBMS. This allows you to have as much fields as you need. Hundreds of columns in a table is fine for - ClickHouse.

- -

Querying the sample dataset

- -

Here are some examples of the queries from our test data.

- -
    -
  • -
    the most popular destinations in 2015; -
    -
    -SELECT
    -    OriginCityName,
    -    DestCityName,
    -    count(*) AS flights,
    -    bar(flights, 0, 20000, 40)
    -FROM ontime WHERE Year = 2015 GROUP BY OriginCityName, DestCityName ORDER BY flights DESC LIMIT 20
    -
    - -
    -SELECT
    -    OriginCityName < DestCityName ? OriginCityName : DestCityName AS a,
    -    OriginCityName < DestCityName ? DestCityName : OriginCityName AS b,
    -    count(*) AS flights,
    -    bar(flights, 0, 40000, 40)
    -FROM ontime WHERE Year = 2015 GROUP BY a, b ORDER BY flights DESC LIMIT 20
    -
    -
    -
    -
  • -
  • -
    the most popular cities of departure; -
    -
    -SELECT OriginCityName, count(*) AS flights
    -FROM ontime GROUP BY OriginCityName ORDER BY flights DESC LIMIT 20
    -
    -
    -
    -
  • -
  • -
    cities of departure which offer maximum variety of - destinations; -
    -
    -SELECT OriginCityName, uniq(Dest) AS u
    -FROM ontime GROUP BY OriginCityName ORDER BY u DESC LIMIT 20
    -
    -
    -
    -
  • -
  • -
    flight delay dependence on the day of week; -
    -
    -SELECT DayOfWeek, count() AS c, avg(DepDelay >  60) AS delays
    -FROM ontime GROUP BY DayOfWeek ORDER BY DayOfWeek
    -
    -
    -
    -
  • -
  • -
    cities of departure with most frequent delays for 1 hour or - longer; -
    -
    -SELECT OriginCityName, count() AS c, avg(DepDelay >  60) AS delays
    -FROM ontime
    -GROUP BY OriginCityName
    -HAVING c >  100000
    -ORDER BY delays DESC
    -LIMIT 20
    -
    -
    -
    -
  • -
  • -
    flights of maximum duration; -
    -
    -SELECT OriginCityName, DestCityName, count(*) AS flights, avg(AirTime) AS duration
    -FROM ontime
    -GROUP BY OriginCityName, DestCityName
    -ORDER BY duration DESC
    -LIMIT 20
    -
    -
    -
    -
  • -
  • -
    distribution of arrival time delays split by aircompanies; -
    -
    -SELECT Carrier, count() AS c, round(quantileTDigest(0.99)(DepDelay), 2) AS q
    -FROM ontime GROUP BY Carrier ORDER BY q DESC
    -
    -
    -
    -
  • -
  • -
    aircompanies who stopped flights operation; -
    -
    -SELECT Carrier, min(Year), max(Year), count()
    -FROM ontime GROUP BY Carrier HAVING max(Year) < 2015 ORDER BY count() DESC
    -
    -
    -
    -
  • -
  • -
    most trending destination cities in 2015; -
    -
    -SELECT
    -    DestCityName,
    -    sum(Year = 2014) AS c2014,
    -    sum(Year = 2015) AS c2015,
    -    c2015 / c2014 AS diff
    -FROM ontime
    -WHERE Year IN (2014, 2015)
    -GROUP BY DestCityName
    -HAVING c2014 >  10000 AND c2015 >  1000 AND diff >  1
    -ORDER BY diff DESC
    -
    -
    -
    -
  • -
  • -
    destination cities with maximum popularity-season - dependency. -
    -
    -SELECT
    -    DestCityName,
    -    any(total),
    -    avg(abs(monthly * 12 - total) / total) AS avg_month_diff
    -FROM
    -(
    -    SELECT DestCityName, count() AS total
    -    FROM ontime GROUP BY DestCityName HAVING total > 100000
    -)
    -ALL INNER JOIN
    -(
    -    SELECT DestCityName, Month, count() AS monthly
    -    FROM ontime GROUP BY DestCityName, Month HAVING monthly > 10000
    -)
    -USING DestCityName
    -GROUP BY DestCityName
    -ORDER BY avg_month_diff DESC
    -LIMIT 20
    -
    -
    -
    -
  • -
- -

ClickHouse deployment to cluster

-

ClickHouse cluster is a homogenous cluster. Steps to set up: -

    -
  1. Install ClickHouse server on all machines of the cluster
  2. -
  3. Set up cluster configs in configuration file
  4. -
  5. Create local tables on each instance
  6. -
  7. Create a Distributed table
  8. -
-

- -

Distributed-table is actually a kind of - "view" to local tables of ClickHouse cluster. SELECT query from a distributed table will be executed using - resources of all cluster's shards. You may specify configs for multiple clusters and create multiple - Distributed-tables providing views to different clusters.

- -
Config for cluster of three shards. Each shard stores data on a single - replica -
-
-<remote_servers>
-    <perftest_3shards_1replicas>
-        <shard>
-            <replica>
-                <host>example-perftest01j.yandex.ru</host>
-                <port>9000</port>
-            </replica>
-        </shard>
-        <shard>
-            <replica>
-                <host>example-perftest02j.yandex.ru</host>
-                <port>9000</port>
-            </replica>
-        </shard>
-        <shard>
-            <replica>
-                <host>example-perftest03j.yandex.ru</host>
-                <port>9000</port>
-            </replica>
-        </shard>
-    </perftest_3shards_1replicas>
-</remote_servers>
-
-
-
- Creating a local table: -
CREATE TABLE ontime_local (...) ENGINE = MergeTree(FlightDate, (Year, FlightDate), 8192);
- Creating a distributed table providing a view into local tables of the cluster: -
CREATE TABLE ontime_all AS ontime_local
-    ENGINE = Distributed(perftest_3shards_1replicas, default, ontime_local, rand());
- -

You can create a Distributed table on all machines in the cluster. This would allow to run distributed queries on - any machine of the cluster. Besides distributed table you can also use *remote* table function.

- -

Let's run INSERT SELECT into Distributed table - to spread the table to multiple servers.

- -
INSERT INTO ontime_all SELECT * FROM ontime;
- -

Worth to notice that the approach given above wouldn't fit for sharding of large - tables.

- -

As you could expect heavy queries are executed N times faster being launched on 3 servers instead of one.

-
See here -
- - -

You may have noticed that quantiles calculation are slightly different. This happens due to t-digest - algorithm implementation which is non-deterministic — it depends on the order of data processing.

-
-
- -

In this case we have used a cluster with 3 shards each contains a single replica.

- -

To provide for resilience in production environment we recommend that each shard should contain 2-3 replicas - distributed between multiple data-centers. Note that ClickHouse supports unlimited number of replicas.

- -
Config for cluster of one shard containing three replicas -
-
-<remote_servers>
-    ...
-    <perftest_1shards_3replicas>
-        <shard>
-            <replica>
-                <host>example-perftest01j.yandex.ru</host>
-                <port>9000</port>
-             </replica>
-             <replica>
-                <host>example-perftest02j.yandex.ru</host>
-                <port>9000</port>
-             </replica>
-             <replica>
-                <host>example-perftest03j.yandex.ru</host>
-                <port>9000</port>
-             </replica>
-        </shard>
-    </perftest_1shards_3replicas>
-</remote_servers>
-
-
-
- -

To enable replication ZooKeeper is required. - ClickHouse will take care of data consistency on all replicas and run restore procedure after failure - automatically. It's recommended to deploy ZooKeeper cluster to separate servers.

- -

ZooKeeper is not a requirement — in some simple cases you can duplicate the data by writing it into all the - replicas from your application code. This approach is not recommended — in this case ClickHouse is not able to - guarantee data consistency on all replicas. This remains the responsibility of your application.

- -
Set ZooKeeper locations in configuration file -
-
-<zookeeper>
-    <node>
-        <host>zoo01.yandex.ru</host>
-        <port>2181</port>
-    </node>
-    <node>
-        <host>zoo02.yandex.ru</host>
-        <port>2181</port>
-    </node>
-    <node>
-        <host>zoo03.yandex.ru</host>
-        <port>2181</port>
-    </node>
-</zookeeper>
-
-
-
- -

Also we need to set macros for identifying shard and replica — it will be used on table creation

-
-<macros>
-    <shard>01</shard>
-    <replica>01</replica>
-</macros>
-
-

If there are no replicas at the moment on replicated table creation — a new first replica will be instantiated. - If there are already live replicas — new replica will clone the data from existing ones. You have an option to - create all replicated tables first and that insert data to it. Another option is to create some replicas and add - the others after or during data insertion.

- -
-CREATE TABLE ontime_replica (...)
-ENGINE = ReplicatedMergeTree(
-    '/clickhouse_perftest/tables/{shard}/ontime',
-    '{replica}',
-    FlightDate,
-    (Year, FlightDate),
-    8192);
-
-

Here we use ReplicatedMergeTree - table type. In parameters we specify ZooKeeper path containing shard and replica identifiers.

- -
INSERT INTO ontime_replica SELECT * FROM ontime;
-

Replication operates in multi-master mode. Data can be loaded into any replica — it will be synced with other - instances automatically. Replication is asynchronous so at a given moment of time not all replicas may contain - recently inserted data. To allow data insertion at least one replica should be up. Others will sync up data and - repair consistency once they will become active again. Please notice that such scheme allows for the possibility - of just appended data loss.

- -

- ClickHouse source code is published under Apache 2.0 License. Software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - KIND, either express or implied.

- - - -
- - - - - - - - - - -