Merge branch 'master' into patch-2

This commit is contained in:
mergify[bot] 2022-02-11 14:43:18 +00:00 committed by GitHub
commit 3d7bb71997
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
322 changed files with 7012 additions and 1931 deletions

View File

@ -142,6 +142,7 @@ Checks: '-*,
clang-analyzer-cplusplus.PlacementNewChecker,
clang-analyzer-cplusplus.SelfAssignment,
clang-analyzer-deadcode.DeadStores,
clang-analyzer-cplusplus.Move,
clang-analyzer-optin.cplusplus.VirtualCall,
clang-analyzer-security.insecureAPI.UncheckedReturn,
clang-analyzer-security.insecureAPI.bcmp,

3
.gitmodules vendored
View File

@ -259,3 +259,6 @@
[submodule "contrib/azure"]
path = contrib/azure
url = https://github.com/ClickHouse-Extras/azure-sdk-for-cpp.git
[submodule "contrib/minizip-ng"]
path = contrib/minizip-ng
url = https://github.com/zlib-ng/minizip-ng

View File

@ -78,6 +78,7 @@ add_contrib (croaring-cmake croaring)
add_contrib (zstd-cmake zstd)
add_contrib (zlib-ng-cmake zlib-ng)
add_contrib (bzip2-cmake bzip2)
add_contrib (minizip-ng-cmake minizip-ng)
add_contrib (snappy-cmake snappy)
add_contrib (rocksdb-cmake rocksdb)
add_contrib (thrift-cmake thrift)

View File

@ -56,19 +56,11 @@ list(APPEND SOURCES ${CASS_SRC_DIR}/atomic/atomic_std.hpp)
add_library(_curl_hostcheck OBJECT ${CASS_SRC_DIR}/third_party/curl/hostcheck.cpp)
add_library(_hdr_histogram OBJECT ${CASS_SRC_DIR}/third_party/hdr_histogram/hdr_histogram.cpp)
add_library(_http-parser OBJECT ${CASS_SRC_DIR}/third_party/http-parser/http_parser.c)
add_library(_minizip OBJECT
${CASS_SRC_DIR}/third_party/minizip/ioapi.c
${CASS_SRC_DIR}/third_party/minizip/zip.c
${CASS_SRC_DIR}/third_party/minizip/unzip.c)
target_link_libraries(_minizip ch_contrib::zlib)
target_compile_definitions(_minizip PRIVATE "-Dz_crc_t=unsigned long")
list(APPEND INCLUDE_DIRS
${CASS_SRC_DIR}/third_party/curl
${CASS_SRC_DIR}/third_party/hdr_histogram
${CASS_SRC_DIR}/third_party/http-parser
${CASS_SRC_DIR}/third_party/minizip
${CASS_SRC_DIR}/third_party/mt19937_64
${CASS_SRC_DIR}/third_party/rapidjson/rapidjson
${CASS_SRC_DIR}/third_party/sparsehash/src)
@ -123,10 +115,9 @@ add_library(_cassandra
${SOURCES}
$<TARGET_OBJECTS:_curl_hostcheck>
$<TARGET_OBJECTS:_hdr_histogram>
$<TARGET_OBJECTS:_http-parser>
$<TARGET_OBJECTS:_minizip>)
$<TARGET_OBJECTS:_http-parser>)
target_link_libraries(_cassandra ch_contrib::zlib)
target_link_libraries(_cassandra ch_contrib::zlib ch_contrib::minizip)
target_include_directories(_cassandra PRIVATE ${CMAKE_CURRENT_BINARY_DIR} ${INCLUDE_DIRS})
target_include_directories(_cassandra SYSTEM BEFORE PUBLIC ${CASS_INCLUDE_DIR})
target_compile_definitions(_cassandra PRIVATE CASS_BUILDING)

1
contrib/minizip-ng vendored Submodule

@ -0,0 +1 @@
Subproject commit 6cffc951851620e0fac1993be75e4713c334de03

View File

@ -0,0 +1,168 @@
option(ENABLE_MINIZIP "Enable minizip-ng the zip manipulation library" ${ENABLE_LIBRARIES})
if (NOT ENABLE_MINIZIP)
message (STATUS "minizip-ng disabled")
return()
endif()
set(_MINIZIP_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/minizip-ng")
# Initial source files
set(MINIZIP_SRC
${_MINIZIP_SOURCE_DIR}/mz_crypt.c
${_MINIZIP_SOURCE_DIR}/mz_os.c
${_MINIZIP_SOURCE_DIR}/mz_strm.c
${_MINIZIP_SOURCE_DIR}/mz_strm_buf.c
${_MINIZIP_SOURCE_DIR}/mz_strm_mem.c
${_MINIZIP_SOURCE_DIR}/mz_strm_split.c
${_MINIZIP_SOURCE_DIR}/mz_zip.c
${_MINIZIP_SOURCE_DIR}/mz_zip_rw.c)
# Initial header files
set(MINIZIP_HDR
${_MINIZIP_SOURCE_DIR}/mz.h
${_MINIZIP_SOURCE_DIR}/mz_os.h
${_MINIZIP_SOURCE_DIR}/mz_crypt.h
${_MINIZIP_SOURCE_DIR}/mz_strm.h
${_MINIZIP_SOURCE_DIR}/mz_strm_buf.h
${_MINIZIP_SOURCE_DIR}/mz_strm_mem.h
${_MINIZIP_SOURCE_DIR}/mz_strm_split.h
${_MINIZIP_SOURCE_DIR}/mz_strm_os.h
${_MINIZIP_SOURCE_DIR}/mz_zip.h
${_MINIZIP_SOURCE_DIR}/mz_zip_rw.h)
set(MINIZIP_INC ${_MINIZIP_SOURCE_DIR})
set(MINIZIP_DEF)
set(MINIZIP_PUBLIC_DEF)
set(MINIZIP_LIB)
# Check if zlib is present
set(MZ_ZLIB ON)
if(MZ_ZLIB)
# Use zlib from ClickHouse contrib
list(APPEND MINIZIP_LIB ch_contrib::zlib)
list(APPEND MINIZIP_SRC
${_MINIZIP_SOURCE_DIR}/mz_strm_zlib.c)
list(APPEND MINIZIP_HDR
${_MINIZIP_SOURCE_DIR}/mz_strm_zlib.h)
list(APPEND MINIZIP_DEF "-DHAVE_ZLIB")
endif()
# Check if bzip2 is present
set(MZ_BZIP2 ${ENABLE_BZIP2})
if(MZ_BZIP2)
# Use bzip2 from ClickHouse contrib
list(APPEND MINIZIP_LIB ch_contrib::bzip2)
list(APPEND MINIZIP_SRC
${_MINIZIP_SOURCE_DIR}/mz_strm_bzip.c)
list(APPEND MINIZIP_HDR
${_MINIZIP_SOURCE_DIR}/mz_strm_bzip.h)
list(APPEND MINIZIP_DEF "-DHAVE_BZIP2")
endif()
# Check if liblzma is present
set(MZ_LZMA ON)
if(MZ_LZMA)
# Use liblzma from ClickHouse contrib
list(APPEND MINIZIP_LIB ch_contrib::xz)
list(APPEND MINIZIP_SRC
${_MINIZIP_SOURCE_DIR}/mz_strm_lzma.c)
list(APPEND MINIZIP_HDR
${_MINIZIP_SOURCE_DIR}/mz_strm_lzma.h)
list(APPEND MINIZIP_DEF "-DHAVE_LZMA")
endif()
# Check if zstd is present
set(MZ_ZSTD ON)
if(MZ_ZSTD)
# Use zstd from ClickHouse contrib
list(APPEND MINIZIP_LIB ch_contrib::zstd)
list(APPEND MINIZIP_SRC
${_MINIZIP_SOURCE_DIR}/mz_strm_zstd.c)
list(APPEND MINIZIP_HDR
${_MINIZIP_SOURCE_DIR}/mz_strm_zstd.h)
list(APPEND MINIZIP_DEF "-DHAVE_ZSTD")
endif()
if(NOT MZ_ZLIB AND NOT MZ_ZSTD AND NOT MZ_BZIP2 AND NOT MZ_LZMA)
message(STATUS "Compression not supported due to missing libraries")
list(APPEND MINIZIP_DEF -DMZ_ZIP_NO_DECOMPRESSION)
list(APPEND MINIZIP_DEF -DMZ_ZIP_NO_COMPRESSION)
endif()
# Check to see if openssl installation is present
set(MZ_OPENSSL ${ENABLE_SSL})
if(MZ_OPENSSL)
# Use openssl from ClickHouse contrib
list(APPEND MINIZIP_LIB OpenSSL::SSL OpenSSL::Crypto)
list(APPEND MINIZIP_SRC
${_MINIZIP_SOURCE_DIR}/mz_crypt_openssl.c)
endif()
# Include WinZIP AES encryption
set(MZ_WZAES ${ENABLE_SSL})
if(MZ_WZAES)
list(APPEND MINIZIP_DEF -DHAVE_WZAES)
list(APPEND MINIZIP_SRC
${_MINIZIP_SOURCE_DIR}/mz_strm_wzaes.c)
list(APPEND MINIZIP_HDR
${_MINIZIP_SOURCE_DIR}/mz_strm_wzaes.h)
endif()
# Include traditional PKWare encryption
set(MZ_PKCRYPT ON)
if(MZ_PKCRYPT)
list(APPEND MINIZIP_DEF -DHAVE_PKCRYPT)
list(APPEND MINIZIP_SRC
${_MINIZIP_SOURCE_DIR}/mz_strm_pkcrypt.c)
list(APPEND MINIZIP_HDR
${_MINIZIP_SOURCE_DIR}/mz_strm_pkcrypt.h)
endif()
# Unix specific
if(UNIX)
list(APPEND MINIZIP_SRC
${_MINIZIP_SOURCE_DIR}/mz_os_posix.c
${_MINIZIP_SOURCE_DIR}/mz_strm_os_posix.c)
endif()
# Include compatibility layer
set(MZ_COMPAT ON)
if(MZ_COMPAT)
list(APPEND MINIZIP_SRC
${_MINIZIP_SOURCE_DIR}/mz_compat.c)
list(APPEND MINIZIP_HDR
${_MINIZIP_SOURCE_DIR}/mz_compat.h
zip.h
unzip.h)
list(APPEND MINIZIP_INC "${CMAKE_CURRENT_SOURCE_DIR}")
list(APPEND MINIZIP_PUBLIC_DEF "-DMZ_COMPAT_VERSION=110")
endif()
add_library(_minizip ${MINIZIP_SRC} ${MINIZIP_HDR})
target_include_directories(_minizip PUBLIC ${MINIZIP_INC})
target_compile_definitions(_minizip PUBLIC ${MINIZIP_PUBLIC_DEF})
target_compile_definitions(_minizip PRIVATE ${MINIZIP_DEF})
target_link_libraries(_minizip PRIVATE ${MINIZIP_LIB})
add_library(ch_contrib::minizip ALIAS _minizip)

View File

@ -0,0 +1,13 @@
/* unzip.h -- Compatibility layer shim
part of the minizip-ng project
This program is distributed under the terms of the same license as zlib.
See the accompanying LICENSE file for the full text of the license.
*/
#ifndef MZ_COMPAT_UNZIP
#define MZ_COMPAT_UNZIP
#include "mz_compat.h"
#endif

View File

@ -0,0 +1,13 @@
/* zip.h -- Compatibility layer shim
part of the minizip-ng project
This program is distributed under the terms of the same license as zlib.
See the accompanying LICENSE file for the full text of the license.
*/
#ifndef MZ_COMPAT_ZIP
#define MZ_COMPAT_ZIP
#include "mz_compat.h"
#endif

View File

@ -127,11 +127,6 @@ endif()
if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
add_definitions(-DOS_MACOSX)
if(CMAKE_SYSTEM_PROCESSOR MATCHES arm)
add_definitions(-DIOS_CROSS_COMPILE -DROCKSDB_LITE)
# no debug info for IOS, that will make our library big
add_definitions(-DNDEBUG)
endif()
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
add_definitions(-DOS_LINUX)
elseif(CMAKE_SYSTEM_NAME MATCHES "SunOS")

View File

@ -16,6 +16,8 @@ Restart=always
RestartSec=30
RuntimeDirectory=clickhouse-server
ExecStart=/usr/bin/clickhouse-server --config=/etc/clickhouse-server/config.xml --pid-file=/run/clickhouse-server/clickhouse-server.pid
# Minus means that this file is optional.
EnvironmentFile=-/etc/default/clickhouse
LimitCORE=infinity
LimitNOFILE=500000
CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE

View File

@ -886,3 +886,12 @@ S3 disk can be configured as `main` or `cold` storage:
```
In case of `cold` option a data can be moved to S3 if local disk free size will be smaller than `move_factor * disk_size` or by TTL move rule.
## Virtual Columns {#virtual-columns}
- `_part` — Name of a part.
- `_part_index` — Sequential index of the part in the query result.
- `_partition_id` — Name of a partition.
- `_part_uuid` — Unique part identifier (if enabled MergeTree setting `assign_part_uuids`).
- `_partition_value` — Values (a tuple) of a `partition by` expression.
- `_sample_factor` — Sample factor (from the query).

View File

@ -108,7 +108,13 @@ Examples of configuration for quorum with three nodes can be found in [integrati
ClickHouse Keeper is bundled into the ClickHouse server package, just add configuration of `<keeper_server>` and start ClickHouse server as always. If you want to run standalone ClickHouse Keeper you can start it in a similar way with:
```bash
clickhouse-keeper --config /etc/your_path_to_config/config.xml --daemon
clickhouse-keeper --config /etc/your_path_to_config/config.xml
```
If you don't have the symlink (`clickhouse-keeper`) you can create it or specify `keeper` as argument:
```bash
clickhouse keeper --config /etc/your_path_to_config/config.xml
```
## Four Letter Word Commands {#four-letter-word-commands}

View File

@ -27,7 +27,7 @@ To analyze the `trace_log` system table:
For security reasons, introspection functions are disabled by default.
- Use the `addressToLine`, `addressToSymbol` and `demangle` [introspection functions](../../sql-reference/functions/introspection.md) to get function names and their positions in ClickHouse code. To get a profile for some query, you need to aggregate data from the `trace_log` table. You can aggregate data by individual functions or by the whole stack traces.
- Use the `addressToLine`, `addressToLineWithInlines`, `addressToSymbol` and `demangle` [introspection functions](../../sql-reference/functions/introspection.md) to get function names and their positions in ClickHouse code. To get a profile for some query, you need to aggregate data from the `trace_log` table. You can aggregate data by individual functions or by the whole stack traces.
If you need to visualize `trace_log` info, try [flamegraph](../../interfaces/third-party/gui/#clickhouse-flamegraph) and [speedscope](https://github.com/laplab/clickhouse-speedscope).

View File

@ -2,7 +2,7 @@
Contains stack traces of all server threads. Allows developers to introspect the server state.
To analyze stack frames, use the `addressToLine`, `addressToSymbol` and `demangle` [introspection functions](../../sql-reference/functions/introspection.md).
To analyze stack frames, use the `addressToLine`, `addressToLineWithInlines`, `addressToSymbol` and `demangle` [introspection functions](../../sql-reference/functions/introspection.md).
Columns:

View File

@ -4,7 +4,7 @@ Contains stack traces collected by the sampling query profiler.
ClickHouse creates this table when the [trace_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-trace_log) server configuration section is set. Also the [query_profiler_real_time_period_ns](../../operations/settings/settings.md#query_profiler_real_time_period_ns) and [query_profiler_cpu_time_period_ns](../../operations/settings/settings.md#query_profiler_cpu_time_period_ns) settings should be set.
To analyze logs, use the `addressToLine`, `addressToSymbol` and `demangle` introspection functions.
To analyze logs, use the `addressToLine`, `addressToLineWithInlines`, `addressToSymbol` and `demangle` introspection functions.
Columns:

View File

@ -1,9 +1,9 @@
---
toc_priority: 40
toc_title: UInt8, UInt16, UInt32, UInt64, UInt256, Int8, Int16, Int32, Int64, Int128, Int256
toc_title: UInt8, UInt16, UInt32, UInt64, UInt128, UInt256, Int8, Int16, Int32, Int64, Int128, Int256
---
# UInt8, UInt16, UInt32, UInt64, UInt256, Int8, Int16, Int32, Int64, Int128, Int256 {#uint8-uint16-uint32-uint64-uint256-int8-int16-int32-int64-int128-int256}
# UInt8, UInt16, UInt32, UInt64, UInt128, UInt256, Int8, Int16, Int32, Int64, Int128, Int256
Fixed-length integers, with or without a sign.

View File

@ -120,7 +120,7 @@ The `mail` and `phone` fields are of type String, but the `icq` field is `UInt32
Get the first available contact method for the customer from the contact list:
``` sql
SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook;
SELECT name, coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook;
```
``` text

View File

@ -113,6 +113,111 @@ trace_source_code_lines: /lib/x86_64-linux-gnu/libpthread-2.27.so
/build/glibc-OTsEL5/glibc-2.27/misc/../sysdeps/unix/sysv/linux/x86_64/clone.S:97
```
## addressToLineWithInlines {#addresstolinewithinlines}
Similar to `addressToLine`, but it will return an Array with all inline functions, and will be much slower as a price.
If you use official ClickHouse packages, you need to install the `clickhouse-common-static-dbg` package.
**Syntax**
``` sql
addressToLineWithInlines(address_of_binary_instruction)
```
**Arguments**
- `address_of_binary_instruction` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Address of instruction in a running process.
**Returned value**
- Array which first element is source code filename and the line number in this file delimited by colon. And from second element, inline functions' source code filename and line number and function name are listed.
- Array with single element which is name of a binary, if the function couldnt find the debug information.
- Empty array, if the address is not valid.
Type: [Array(String)](../../sql-reference/data-types/array.md).
**Example**
Enabling introspection functions:
``` sql
SET allow_introspection_functions=1;
```
Applying the function to address.
```sql
SELECT addressToLineWithInlines(531055181::UInt64);
```
``` text
┌─addressToLineWithInlines(CAST('531055181', 'UInt64'))────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ ['./src/Functions/addressToLineWithInlines.cpp:98','./build_normal_debug/./src/Functions/addressToLineWithInlines.cpp:176:DB::(anonymous namespace)::FunctionAddressToLineWithInlines::implCached(unsigned long) const'] │
└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
Applying the function to the whole stack trace:
``` sql
SELECT
ta, addressToLineWithInlines(arrayJoin(trace) as ta)
FROM system.trace_log
WHERE
query_id = '5e173544-2020-45de-b645-5deebe2aae54';
```
The [arrayJoin](../../sql-reference/functions/array-functions.md#array-functions-join) functions will split array to rows.
``` text
┌────────ta─┬─addressToLineWithInlines(arrayJoin(trace))───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ 365497529 │ ['./build_normal_debug/./contrib/libcxx/include/string_view:252'] │
│ 365593602 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:191'] │
│ 365593866 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:0'] │
│ 365592528 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:0'] │
│ 365591003 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:477'] │
│ 365590479 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:442'] │
│ 365590600 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:457'] │
│ 365598941 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:0'] │
│ 365607098 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:0'] │
│ 365590571 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:451'] │
│ 365598941 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:0'] │
│ 365607098 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:0'] │
│ 365590571 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:451'] │
│ 365598941 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:0'] │
│ 365607098 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:0'] │
│ 365590571 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:451'] │
│ 365598941 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:0'] │
│ 365597289 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:807'] │
│ 365599840 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:1118'] │
│ 531058145 │ ['./build_normal_debug/./src/Functions/addressToLineWithInlines.cpp:152'] │
│ 531055181 │ ['./src/Functions/addressToLineWithInlines.cpp:98','./build_normal_debug/./src/Functions/addressToLineWithInlines.cpp:176:DB::(anonymous namespace)::FunctionAddressToLineWithInlines::implCached(unsigned long) const'] │
│ 422333613 │ ['./build_normal_debug/./src/Functions/IFunctionAdaptors.h:21'] │
│ 586866022 │ ['./build_normal_debug/./src/Functions/IFunction.cpp:216'] │
│ 586869053 │ ['./build_normal_debug/./src/Functions/IFunction.cpp:264'] │
│ 586873237 │ ['./build_normal_debug/./src/Functions/IFunction.cpp:334'] │
│ 597901620 │ ['./build_normal_debug/./src/Interpreters/ExpressionActions.cpp:601'] │
│ 597898534 │ ['./build_normal_debug/./src/Interpreters/ExpressionActions.cpp:718'] │
│ 630442912 │ ['./build_normal_debug/./src/Processors/Transforms/ExpressionTransform.cpp:23'] │
│ 546354050 │ ['./build_normal_debug/./src/Processors/ISimpleTransform.h:38'] │
│ 626026993 │ ['./build_normal_debug/./src/Processors/ISimpleTransform.cpp:89'] │
│ 626294022 │ ['./build_normal_debug/./src/Processors/Executors/ExecutionThreadContext.cpp:45'] │
│ 626293730 │ ['./build_normal_debug/./src/Processors/Executors/ExecutionThreadContext.cpp:63'] │
│ 626169525 │ ['./build_normal_debug/./src/Processors/Executors/PipelineExecutor.cpp:213'] │
│ 626170308 │ ['./build_normal_debug/./src/Processors/Executors/PipelineExecutor.cpp:178'] │
│ 626166348 │ ['./build_normal_debug/./src/Processors/Executors/PipelineExecutor.cpp:329'] │
│ 626163461 │ ['./build_normal_debug/./src/Processors/Executors/PipelineExecutor.cpp:84'] │
│ 626323536 │ ['./build_normal_debug/./src/Processors/Executors/PullingAsyncPipelineExecutor.cpp:85'] │
│ 626323277 │ ['./build_normal_debug/./src/Processors/Executors/PullingAsyncPipelineExecutor.cpp:112'] │
│ 626323133 │ ['./build_normal_debug/./contrib/libcxx/include/type_traits:3682'] │
│ 626323041 │ ['./build_normal_debug/./contrib/libcxx/include/tuple:1415'] │
└───────────┴──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
## addressToSymbol {#addresstosymbol}
Converts virtual memory address inside ClickHouse server process to the symbol from ClickHouse object files.

View File

@ -22,7 +22,7 @@ tuple(x, y, …)
## tupleElement {#tupleelement}
A function that allows getting a column from a tuple.
N is the column index, starting from 1. N must be a constant. N must be a constant. N must be a strict postive integer no greater than the size of the tuple.
N is the column index, starting from 1. N must be a constant. N must be a strict postive integer no greater than the size of the tuple.
There is no cost to execute the function.
The function implements the operator `x.N`.

View File

@ -172,6 +172,7 @@ Hierarchy of privileges:
- `SYSTEM FLUSH LOGS`
- [INTROSPECTION](#grant-introspection)
- `addressToLine`
- `addressToLineWithInlines`
- `addressToSymbol`
- `demangle`
- [SOURCES](#grant-sources)
@ -430,6 +431,7 @@ Allows using [introspection](../../operations/optimizing-performance/sampling-qu
- `INTROSPECTION`. Level: `GROUP`. Aliases: `INTROSPECTION FUNCTIONS`
- `addressToLine`. Level: `GLOBAL`
- `addressToLineWithInlines`. Level: `GLOBAL`
- `addressToSymbol`. Level: `GLOBAL`
- `demangle`. Level: `GLOBAL`

View File

@ -5,6 +5,6 @@ toc_title: Roadmap
# Roadmap {#roadmap}
The roadmap for the year 2021 is published for open discussion [here](https://github.com/ClickHouse/ClickHouse/issues/17623).
The roadmap for the year 2022 is published for open discussion [here](https://github.com/ClickHouse/ClickHouse/issues/32513).
{## [Original article](https://clickhouse.com/docs/en/roadmap/) ##}

View File

@ -26,4 +26,6 @@ toc_title: Introduction
- [Replicated](../../engines/database-engines/replicated.md)
- [SQLite](../../engines/database-engines/sqlite.md)
[来源文章](https://clickhouse.com/docs/en/database_engines/) <!--hide-->

View File

@ -24,6 +24,7 @@ ENGINE = PostgreSQL('host:port', 'database', 'user', 'password'[, `use_table_cac
- `database` — 远程数据库名次
- `user` — PostgreSQL用户名称
- `password` — PostgreSQL用户密码
- `schema` - PostgreSQL 模式
- `use_table_cache` — 定义数据库表结构是否已缓存或不进行。可选的。默认值: `0`.
## 支持的数据类型 {#data_types-support}

View File

@ -31,6 +31,7 @@ CREATE DATABASE testdb ENGINE = Replicated('zoo_path', 'shard_name', 'replica_na
当创建数据库的新副本时,该副本会自己创建表。如果副本已经不可用很长一段时间,并且已经滞后于复制日志-它用ZooKeeper中的当前元数据检查它的本地元数据将带有数据的额外表移动到一个单独的非复制数据库(以免意外地删除任何多余的东西),创建缺失的表,如果表名已经被重命名,则更新表名。数据在`ReplicatedMergeTree`级别被复制,也就是说,如果表没有被复制,数据将不会被复制(数据库只负责元数据)。
允许[`ALTER TABLE ATTACH|FETCH|DROP|DROP DETACHED|DETACH PARTITION|PART`](../../sql-reference/statements/alter/partition.md)查询,但不允许复制。数据库引擎将只向当前副本添加/获取/删除分区/部件。但是如果表本身使用了Replicated表引擎那么数据将在使用`ATTACH`后被复制。
## 使用示例 {#usage-example}
创建三台主机的集群:

View File

@ -1 +0,0 @@
../../../en/engines/database-engines/sqlite.md

View File

@ -0,0 +1,80 @@
---
toc_priority: 32
toc_title: SQLite
---
# SQLite {#sqlite}
允许连接到[SQLite](https://www.sqlite.org/index.html)数据库并支持ClickHouse和SQLite交换数据 执行 `INSERT``SELECT` 查询。
## 创建一个数据库 {#creating-a-database}
``` sql
CREATE DATABASE sqlite_database
ENGINE = SQLite('db_path')
```
**引擎参数**
- `db_path` — SQLite 数据库文件的路径.
## 数据类型的支持 {#data_types-support}
| SQLite | ClickHouse |
|---------------|---------------------------------------------------------|
| INTEGER | [Int32](../../sql-reference/data-types/int-uint.md) |
| REAL | [Float32](../../sql-reference/data-types/float.md) |
| TEXT | [String](../../sql-reference/data-types/string.md) |
| BLOB | [String](../../sql-reference/data-types/string.md) |
## 技术细节和建议 {#specifics-and-recommendations}
SQLite将整个数据库(定义、表、索引和数据本身)存储为主机上的单个跨平台文件。在写入过程中SQLite会锁定整个数据库文件因此写入操作是顺序执行的。读操作可以是多任务的。
SQLite不需要服务管理(如启动脚本)或基于`GRANT`和密码的访问控制。访问控制是通过授予数据库文件本身的文件系统权限来处理的。
## 使用示例 {#usage-example}
数据库在ClickHouse连接到SQLite:
``` sql
CREATE DATABASE sqlite_db ENGINE = SQLite('sqlite.db');
SHOW TABLES FROM sqlite_db;
```
``` text
┌──name───┐
│ table1 │
│ table2 │
└─────────┘
```
展示数据表中的内容:
``` sql
SELECT * FROM sqlite_db.table1;
```
``` text
┌─col1──┬─col2─┐
│ line1 │ 1 │
│ line2 │ 2 │
│ line3 │ 3 │
└───────┴──────┘
```
从ClickHouse表插入数据到SQLite表:
``` sql
CREATE TABLE clickhouse_table(`col1` String,`col2` Int16) ENGINE = MergeTree() ORDER BY col2;
INSERT INTO clickhouse_table VALUES ('text',10);
INSERT INTO sqlite_db.table1 SELECT * FROM clickhouse_table;
SELECT * FROM sqlite_db.table1;
```
``` text
┌─col1──┬─col2─┐
│ line1 │ 1 │
│ line2 │ 2 │
│ line3 │ 3 │
│ text │ 10 │
└───────┴──────┘
```

View File

@ -0,0 +1,416 @@
---
toc_priority: 4
toc_title: Hive
---
# Hive {#hive}
Hive引擎允许对HDFS Hive表执行 `SELECT` 查询。目前它支持如下输入格式:
-文本:只支持简单的标量列类型,除了 `Binary`
- ORC:支持简单的标量列类型,除了`char`; 只支持 `array` 这样的复杂类型
- Parquet:支持所有简单标量列类型;只支持 `array` 这样的复杂类型
## 创建表 {#creating-a-table}
``` sql
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
(
name1 [type1] [ALIAS expr1],
name2 [type2] [ALIAS expr2],
...
) ENGINE = Hive('thrift://host:port', 'database', 'table');
PARTITION BY expr
```
查看[CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query)查询的详细描述。
表的结构可以与原来的Hive表结构有所不同:
- 列名应该与原来的Hive表相同但你可以使用这些列中的一些并以任何顺序你也可以使用一些从其他列计算的别名列。
- 列类型与原Hive表的列类型保持一致。
- “Partition by expression”应与原Hive表保持一致“Partition by expression”中的列应在表结构中。
**引擎参数**
- `thrift://host:port` — Hive Metastore 地址
- `database` — 远程数据库名.
- `table` — 远程数据表名.
## 使用示例 {#usage-example}
### 如何使用HDFS文件系统的本地缓存
我们强烈建议您为远程文件系统启用本地缓存。基准测试显示,如果使用缓存,它的速度会快两倍。
在使用缓存之前,请将其添加到 `config.xml`
``` xml
<local_cache_for_remote_fs>
<enable>true</enable>
<root_dir>local_cache</root_dir>
<limit_size>559096952</limit_size>
<bytes_read_before_flush>1048576</bytes_read_before_flush>
</local_cache_for_remote_fs>
```
- enable: 开启后ClickHouse将为HDFS (远程文件系统)维护本地缓存。
- root_dir: 必需的。用于存储远程文件系统的本地缓存文件的根目录。
- limit_size: 必需的。本地缓存文件的最大大小(单位为字节)。
- bytes_read_before_flush: 从远程文件系统下载文件时刷新到本地文件系统前的控制字节数。缺省值为1MB。
当ClickHouse为远程文件系统启用了本地缓存时用户仍然可以选择不使用缓存并在查询中设置`use_local_cache_for_remote_fs = 0 `, `use_local_cache_for_remote_fs` 默认为 `false`
### 查询 ORC 输入格式的Hive 表
#### 在 Hive 中建表
``` text
hive > CREATE TABLE `test`.`test_orc`(
`f_tinyint` tinyint,
`f_smallint` smallint,
`f_int` int,
`f_integer` int,
`f_bigint` bigint,
`f_float` float,
`f_double` double,
`f_decimal` decimal(10,0),
`f_timestamp` timestamp,
`f_date` date,
`f_string` string,
`f_varchar` varchar(100),
`f_bool` boolean,
`f_binary` binary,
`f_array_int` array<int>,
`f_array_string` array<string>,
`f_array_float` array<float>,
`f_array_array_int` array<array<int>>,
`f_array_array_string` array<array<string>>,
`f_array_array_float` array<array<float>>)
PARTITIONED BY (
`day` string)
ROW FORMAT SERDE
'org.apache.hadoop.hive.ql.io.orc.OrcSerde'
STORED AS INPUTFORMAT
'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat'
OUTPUTFORMAT
'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat'
LOCATION
'hdfs://testcluster/data/hive/test.db/test_orc'
OK
Time taken: 0.51 seconds
hive > insert into test.test_orc partition(day='2021-09-18') select 1, 2, 3, 4, 5, 6.11, 7.22, 8.333, current_timestamp(), current_date(), 'hello world', 'hello world', 'hello world', true, 'hello world', array(1, 2, 3), array('hello world', 'hello world'), array(float(1.1), float(1.2)), array(array(1, 2), array(3, 4)), array(array('a', 'b'), array('c', 'd')), array(array(float(1.11), float(2.22)), array(float(3.33), float(4.44)));
OK
Time taken: 36.025 seconds
hive > select * from test.test_orc;
OK
1 2 3 4 5 6.11 7.22 8 2021-11-05 12:38:16.314 2021-11-05 hello world hello world hello world true hello world [1,2,3] ["hello world","hello world"] [1.1,1.2] [[1,2],[3,4]] [["a","b"],["c","d"]] [[1.11,2.22],[3.33,4.44]] 2021-09-18
Time taken: 0.295 seconds, Fetched: 1 row(s)
```
#### 在 ClickHouse 中建表
ClickHouse中的表从上面创建的Hive表中获取数据:
``` sql
CREATE TABLE test.test_orc
(
`f_tinyint` Int8,
`f_smallint` Int16,
`f_int` Int32,
`f_integer` Int32,
`f_bigint` Int64,
`f_float` Float32,
`f_double` Float64,
`f_decimal` Float64,
`f_timestamp` DateTime,
`f_date` Date,
`f_string` String,
`f_varchar` String,
`f_bool` Bool,
`f_binary` String,
`f_array_int` Array(Int32),
`f_array_string` Array(String),
`f_array_float` Array(Float32),
`f_array_array_int` Array(Array(Int32)),
`f_array_array_string` Array(Array(String)),
`f_array_array_float` Array(Array(Float32)),
`day` String
)
ENGINE = Hive('thrift://202.168.117.26:9083', 'test', 'test_orc')
PARTITION BY day
```
``` sql
SELECT * FROM test.test_orc settings input_format_orc_allow_missing_columns = 1\G
```
``` text
SELECT *
FROM test.test_orc
SETTINGS input_format_orc_allow_missing_columns = 1
Query id: c3eaffdc-78ab-43cd-96a4-4acc5b480658
Row 1:
──────
f_tinyint: 1
f_smallint: 2
f_int: 3
f_integer: 4
f_bigint: 5
f_float: 6.11
f_double: 7.22
f_decimal: 8
f_timestamp: 2021-12-04 04:00:44
f_date: 2021-12-03
f_string: hello world
f_varchar: hello world
f_bool: true
f_binary: hello world
f_array_int: [1,2,3]
f_array_string: ['hello world','hello world']
f_array_float: [1.1,1.2]
f_array_array_int: [[1,2],[3,4]]
f_array_array_string: [['a','b'],['c','d']]
f_array_array_float: [[1.11,2.22],[3.33,4.44]]
day: 2021-09-18
1 rows in set. Elapsed: 0.078 sec.
```
### 查询 Parquest 输入格式的Hive 表
#### 在 Hive 中建表
``` text
hive >
CREATE TABLE `test`.`test_parquet`(
`f_tinyint` tinyint,
`f_smallint` smallint,
`f_int` int,
`f_integer` int,
`f_bigint` bigint,
`f_float` float,
`f_double` double,
`f_decimal` decimal(10,0),
`f_timestamp` timestamp,
`f_date` date,
`f_string` string,
`f_varchar` varchar(100),
`f_char` char(100),
`f_bool` boolean,
`f_binary` binary,
`f_array_int` array<int>,
`f_array_string` array<string>,
`f_array_float` array<float>,
`f_array_array_int` array<array<int>>,
`f_array_array_string` array<array<string>>,
`f_array_array_float` array<array<float>>)
PARTITIONED BY (
`day` string)
ROW FORMAT SERDE
'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'
STORED AS INPUTFORMAT
'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'
OUTPUTFORMAT
'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'
LOCATION
'hdfs://testcluster/data/hive/test.db/test_parquet'
OK
Time taken: 0.51 seconds
hive > insert into test.test_parquet partition(day='2021-09-18') select 1, 2, 3, 4, 5, 6.11, 7.22, 8.333, current_timestamp(), current_date(), 'hello world', 'hello world', 'hello world', true, 'hello world', array(1, 2, 3), array('hello world', 'hello world'), array(float(1.1), float(1.2)), array(array(1, 2), array(3, 4)), array(array('a', 'b'), array('c', 'd')), array(array(float(1.11), float(2.22)), array(float(3.33), float(4.44)));
OK
Time taken: 36.025 seconds
hive > select * from test.test_parquet;
OK
1 2 3 4 5 6.11 7.22 8 2021-12-14 17:54:56.743 2021-12-14 hello world hello world hello world true hello world [1,2,3] ["hello world","hello world"] [1.1,1.2] [[1,2],[3,4]] [["a","b"],["c","d"]] [[1.11,2.22],[3.33,4.44]] 2021-09-18
Time taken: 0.766 seconds, Fetched: 1 row(s)
```
#### 在 ClickHouse 中建表
ClickHouse 中的表, 从上面创建的Hive表中获取数据:
``` sql
CREATE TABLE test.test_parquet
(
`f_tinyint` Int8,
`f_smallint` Int16,
`f_int` Int32,
`f_integer` Int32,
`f_bigint` Int64,
`f_float` Float32,
`f_double` Float64,
`f_decimal` Float64,
`f_timestamp` DateTime,
`f_date` Date,
`f_string` String,
`f_varchar` String,
`f_char` String,
`f_bool` Bool,
`f_binary` String,
`f_array_int` Array(Int32),
`f_array_string` Array(String),
`f_array_float` Array(Float32),
`f_array_array_int` Array(Array(Int32)),
`f_array_array_string` Array(Array(String)),
`f_array_array_float` Array(Array(Float32)),
`day` String
)
ENGINE = Hive('thrift://localhost:9083', 'test', 'test_parquet')
PARTITION BY day
```
``` sql
SELECT * FROM test.test_parquet settings input_format_parquet_allow_missing_columns = 1\G
```
``` text
SELECT *
FROM test_parquet
SETTINGS input_format_parquet_allow_missing_columns = 1
Query id: 4e35cf02-c7b2-430d-9b81-16f438e5fca9
Row 1:
──────
f_tinyint: 1
f_smallint: 2
f_int: 3
f_integer: 4
f_bigint: 5
f_float: 6.11
f_double: 7.22
f_decimal: 8
f_timestamp: 2021-12-14 17:54:56
f_date: 2021-12-14
f_string: hello world
f_varchar: hello world
f_char: hello world
f_bool: true
f_binary: hello world
f_array_int: [1,2,3]
f_array_string: ['hello world','hello world']
f_array_float: [1.1,1.2]
f_array_array_int: [[1,2],[3,4]]
f_array_array_string: [['a','b'],['c','d']]
f_array_array_float: [[1.11,2.22],[3.33,4.44]]
day: 2021-09-18
1 rows in set. Elapsed: 0.357 sec.
```
### 查询文本输入格式的Hive表
#### 在Hive 中建表
``` text
hive >
CREATE TABLE `test`.`test_text`(
`f_tinyint` tinyint,
`f_smallint` smallint,
`f_int` int,
`f_integer` int,
`f_bigint` bigint,
`f_float` float,
`f_double` double,
`f_decimal` decimal(10,0),
`f_timestamp` timestamp,
`f_date` date,
`f_string` string,
`f_varchar` varchar(100),
`f_char` char(100),
`f_bool` boolean,
`f_binary` binary,
`f_array_int` array<int>,
`f_array_string` array<string>,
`f_array_float` array<float>,
`f_array_array_int` array<array<int>>,
`f_array_array_string` array<array<string>>,
`f_array_array_float` array<array<float>>)
PARTITIONED BY (
`day` string)
ROW FORMAT SERDE
'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
STORED AS INPUTFORMAT
'org.apache.hadoop.mapred.TextInputFormat'
OUTPUTFORMAT
'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
LOCATION
'hdfs://testcluster/data/hive/test.db/test_text'
Time taken: 0.1 seconds, Fetched: 34 row(s)
hive > insert into test.test_text partition(day='2021-09-18') select 1, 2, 3, 4, 5, 6.11, 7.22, 8.333, current_timestamp(), current_date(), 'hello world', 'hello world', 'hello world', true, 'hello world', array(1, 2, 3), array('hello world', 'hello world'), array(float(1.1), float(1.2)), array(array(1, 2), array(3, 4)), array(array('a', 'b'), array('c', 'd')), array(array(float(1.11), float(2.22)), array(float(3.33), float(4.44)));
OK
Time taken: 36.025 seconds
hive > select * from test.test_text;
OK
1 2 3 4 5 6.11 7.22 8 2021-12-14 18:11:17.239 2021-12-14 hello world hello world hello world true hello world [1,2,3] ["hello world","hello world"] [1.1,1.2] [[1,2],[3,4]] [["a","b"],["c","d"]] [[1.11,2.22],[3.33,4.44]] 2021-09-18
Time taken: 0.624 seconds, Fetched: 1 row(s)
```
#### 在 ClickHouse 中建表
ClickHouse中的表 从上面创建的Hive表中获取数据:
``` sql
CREATE TABLE test.test_text
(
`f_tinyint` Int8,
`f_smallint` Int16,
`f_int` Int32,
`f_integer` Int32,
`f_bigint` Int64,
`f_float` Float32,
`f_double` Float64,
`f_decimal` Float64,
`f_timestamp` DateTime,
`f_date` Date,
`f_string` String,
`f_varchar` String,
`f_char` String,
`f_bool` Bool,
`day` String
)
ENGINE = Hive('thrift://localhost:9083', 'test', 'test_text')
PARTITION BY day
```
``` sql
SELECT * FROM test.test_text settings input_format_skip_unknown_fields = 1, input_format_with_names_use_header = 1, date_time_input_format = 'best_effort'\G
```
``` text
SELECT *
FROM test.test_text
SETTINGS input_format_skip_unknown_fields = 1, input_format_with_names_use_header = 1, date_time_input_format = 'best_effort'
Query id: 55b79d35-56de-45b9-8be6-57282fbf1f44
Row 1:
──────
f_tinyint: 1
f_smallint: 2
f_int: 3
f_integer: 4
f_bigint: 5
f_float: 6.11
f_double: 7.22
f_decimal: 8
f_timestamp: 2021-12-14 18:11:17
f_date: 2021-12-14
f_string: hello world
f_varchar: hello world
f_char: hello world
f_bool: true
day: 2021-09-18
```

View File

@ -19,3 +19,5 @@ ClickHouse 提供了多种方式来与外部系统集成,包括表引擎。像
- [EmbeddedRocksDB](../../../engines/table-engines/integrations/embedded-rocksdb.md)
- [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md)
- [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md)
- [SQLite](../../../engines/table-engines/integrations/sqlite.md)
- [Hive](../../../engines/table-engines/integrations/hive.md)

View File

@ -1,67 +1,62 @@
---
machine_translated: true
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
---
# 系统。query_thread_log {#system_tables-query_thread_log}
包含有关执行查询的线程的信息,例如,线程名称、线程开始时间、查询处理的持续时间。
始记录:
开启日志功能:
1. 在配置参数 [query_thread_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log)
2. 设置 [log_query_threads](../../operations/settings/settings.md#settings-log-query-threads) 1。
1. 在配置参数 [query_thread_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) 部分
2. 设置 [log_query_threads](../../operations/settings/settings.md#settings-log-query-threads) 1。
数据的冲洗周期设置在 `flush_interval_milliseconds` 的参数 [query_thread_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) 服务器设置部分。 要强制冲洗,请使用 [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs) 查询。
数据从缓存写入数据表周期时间参数 `flush_interval_milliseconds` 位于 [query_thread_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) 服务器设置部分。如果需要强制从缓存写入数据表,请使用 [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs) 查询请求
ClickHouse不会自动从表中删除数据。 看 [导言](../../operations/system-tables/index.md#system-tables-introduction) 欲了解更多详情
ClickHouse不会自动从表中删除数据。 欲了解更多详情,请参照 [介绍](../../operations/system-tables/index.md#system-tables-introduction)。
列:
- `event_date` ([日期](../../sql-reference/data-types/date.md)) — The date when the thread has finished execution of the query.
- `event_time` ([日期时间](../../sql-reference/data-types/datetime.md)) — The date and time when the thread has finished execution of the query.
- `query_start_time` ([日期时间](../../sql-reference/data-types/datetime.md)) — Start time of query execution.
- `query_duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Duration of query execution.
- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of read rows.
- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of read bytes.
- `written_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` 查询,写入的行数。 对于其他查询,列值为0。
- `written_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` 查询时,写入的字节数。 对于其他查询,列值为0。
- `memory_usage` ([Int64](../../sql-reference/data-types/int-uint.md)) — The difference between the amount of allocated and freed memory in context of this thread.
- `peak_memory_usage` ([Int64](../../sql-reference/data-types/int-uint.md)) — The maximum difference between the amount of allocated and freed memory in context of this thread.
- `thread_name` ([字符串](../../sql-reference/data-types/string.md)) — Name of the thread.
- `thread_number` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Internal thread ID.
- `thread_id` ([Int32](../../sql-reference/data-types/int-uint.md)) — thread ID.
- `master_thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — OS initial ID of initial thread.
- `query` ([字符串](../../sql-reference/data-types/string.md)) — Query string.
- `is_initial_query` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Query type. Possible values:
- 1 — Query was initiated by the client.
- 0 — Query was initiated by another query for distributed query execution.
- `user` ([字符串](../../sql-reference/data-types/string.md)) — Name of the user who initiated the current query.
- `query_id` ([字符串](../../sql-reference/data-types/string.md)) — ID of the query.
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that was used to make the query.
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — The client port that was used to make the query.
- `initial_user` ([字符串](../../sql-reference/data-types/string.md)) — Name of the user who ran the initial query (for distributed query execution).
- `initial_query_id` ([字符串](../../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution).
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that the parent query was launched from.
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — The client port that was used to make the parent query.
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Interface that the query was initiated from. Possible values:
- `event_date` ([日期](../../sql-reference/data-types/date.md)) — 该查询线程执行完成的日期。
- `event_time` ([日期时间](../../sql-reference/data-types/datetime.md)) — 该查询线程执行完成的时间。
- `query_start_time` ([日期时间](../../sql-reference/data-types/datetime.md)) — 查询的开始时间。
- `query_duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 查询执行持续的时间。
- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 读取的行数。
- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 读取的字节数。
- `written_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 对于 `INSERT` 查询,写入的行数。 对于其他查询为0。
- `written_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 对于 `INSERT` 查询,写入的字节数。 对于其他查询,为0。
- `memory_usage` ([Int64](../../sql-reference/data-types/int-uint.md)) — 在线程上下文,分配的内存和空闲内存之差。
- `peak_memory_usage` ([Int64](../../sql-reference/data-types/int-uint.md)) — 在线程上下文,分配的内存和空闲内存之差的最大值。
- `thread_name` ([字符串](../../sql-reference/data-types/string.md)) — 线程名。
- `thread_number` ([UInt32](../../sql-reference/data-types/int-uint.md)) — 内部线程ID。
- `thread_id` ([Int32](../../sql-reference/data-types/int-uint.md)) — 线程ID。
- `master_thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — OS初始线程的初始ID。
- `query` ([字符串](../../sql-reference/data-types/string.md)) — 查询语句。
- `is_initial_query` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 查询类型,可能的值:
- 1 — 由用户发起的查询。
- 0 — 由其他查询发起的分布式查询。
- `user` ([字符串](../../sql-reference/data-types/string.md)) — 发起查询的用户名。
- `query_id` ([字符串](../../sql-reference/data-types/string.md)) — 查询的ID。
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 发起查询的IP地址。
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 发起查询的端口。
- `initial_user` ([字符串](../../sql-reference/data-types/string.md)) — 首次发起查询的用户名(对于分布式查询)。
- `initial_query_id` ([字符串](../../sql-reference/data-types/string.md)) — 首次发起查询的ID对于分布式查询
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 发起该查询的父查询IP地址。
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 发起该查询的父查询端口。
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 发起查询的界面,可能的值:
- 1 — TCP.
- 2 — HTTP.
- `os_user` ([字符串](../../sql-reference/data-types/string.md)) — OS's username who runs [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md).
- `client_hostname` ([字符串](../../sql-reference/data-types/string.md)) — Hostname of the client machine where the [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或者运行另一个TCP客户端
- `client_name` ([字符串](../../sql-reference/data-types/string.md)) — The [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或另一个TCP客户端名称。
- `client_revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Revision of the [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或另一个TCP客户端。
- `client_version_major` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Major version of the [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或另一个TCP客户端。
- `client_version_minor` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Minor version of the [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或另一个TCP客户端。
- `client_version_patch` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Patch component of the [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或另一个TCP客户端版本。
- `http_method` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — HTTP method that initiated the query. Possible values:
- 0 — The query was launched from the TCP interface.
- `os_user` ([字符串](../../sql-reference/data-types/string.md)) — 使用 [clickhouse-client](../../interfaces/cli.md) 的系统用户名。
- `client_hostname` ([字符串](../../sql-reference/data-types/string.md)) — 运行 [clickhouse-client](../../interfaces/cli.md) 或另一个TCP客户端的主机名
- `client_name` ([字符串](../../sql-reference/data-types/string.md)) — [clickhouse-client](../../interfaces/cli.md) 或另一个TCP客户端名称。
- `client_revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — [clickhouse-client](../../interfaces/cli.md) 或另一个TCP客户端的修订号
- `client_version_major` ([UInt32](../../sql-reference/data-types/int-uint.md)) — [clickhouse-client](../../interfaces/cli.md) 或另一个TCP客户端的主版本号
- `client_version_minor` ([UInt32](../../sql-reference/data-types/int-uint.md)) — [clickhouse-client](../../interfaces/cli.md) 或另一个TCP客户端的次版本号
- `client_version_patch` ([UInt32](../../sql-reference/data-types/int-uint.md)) — [clickhouse-client](../../interfaces/cli.md) 或另一个TCP客户端的补丁版本
- `http_method` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 发起查询的HTTP方法可能的值
- 0 — 查询通过TCP界面发起。
- 1 — `GET` 方法被使用。
- 2 — `POST` 方法被使用。
- `http_user_agent` ([字符串](../../sql-reference/data-types/string.md)) — The `UserAgent` http请求中传递的标头。
- `quota_key` ([字符串](../../sql-reference/data-types/string.md)) — The “quota key” 在指定 [配额](../../operations/quotas.md) 设置(见 `keyed`).
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
- `ProfileEvents` ([数组(字符串, UInt64)](../../sql-reference/data-types/array.md)) — Counters that measure different metrics for this thread. The description of them could be found in the table [系统。活动](#system_tables-events).
- `http_user_agent` ([字符串](../../sql-reference/data-types/string.md)) — `UserAgent` HTTP请求中传递的UA表头。
- `quota_key` ([字符串](../../sql-reference/data-types/string.md)) — “quota key” 在 [配额](../../operations/quotas.md) 设置`keyed`).
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse 修订版本号.
- `ProfileEvents` ([数组(字符串, UInt64)](../../sql-reference/data-types/array.md)) — 对于该线程的多个指标计数器。这一项可以参考 [system.events](#system_tables-events).
**示例**
@ -113,4 +108,5 @@ ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'Compr
**另请参阅**
- [系统。query_log](../../operations/system-tables/query_log.md#system_tables-query_log) — Description of the `query_log` 系统表,其中包含有关查询执行的公共信息。
- [system.query_log](../../operations/system-tables/query_log.md#system_tables-query_log) — `query_log` 系统表描述,其中包含有关查询执行的公共信息。
- [system.query_views_log](../../operations/system-tables/query_views_log.md#system_tables-query_views_log) — 这个表包含在查询线程中使用的各个视图的信息。

View File

@ -1,17 +1,41 @@
# UInt8,UInt16,UInt32,UInt64,Int8,Int16,Int32,Int64 {#uint8-uint16-uint32-uint64-int8-int16-int32-int64}
---
toc_priority: 40
toc_title: UInt8, UInt16, UInt32, UInt64, UInt128, UInt256, Int8, Int16, Int32, Int64, Int128, Int256
---
# UInt8, UInt16, UInt32, UInt64, UInt128, UInt256, Int8, Int16, Int32, Int64, Int128, Int256
固定长度的整型,包括有符号整型或无符号整型。
创建表时,可以为整数设置类型参数 (例如. `TINYINT(8)`, `SMALLINT(16)`, `INT(32)`, `BIGINT(64)`), 但 ClickHouse 会忽略它们.
## 整型范围 {#int-ranges}
- Int8-\[-128:127\]
- Int16-\[-32768:32767\]
- Int32-\[-2147483648:2147483647\]
- Int64-\[-9223372036854775808:9223372036854775807\]
- `Int8` — \[-128 : 127\]
- `Int16` — \[-32768 : 32767\]
- `Int32` — \[-2147483648 : 2147483647\]
- `Int64` — \[-9223372036854775808 : 9223372036854775807\]
- `Int128` — \[-170141183460469231731687303715884105728 : 170141183460469231731687303715884105727\]
- `Int256` — \[-57896044618658097711785492504343953926634992332820282019728792003956564819968 : 57896044618658097711785492504343953926634992332820282019728792003956564819967\]
别名:
- `Int8``TINYINT`, `BOOL`, `BOOLEAN`, `INT1`.
- `Int16``SMALLINT`, `INT2`.
- `Int32``INT`, `INT4`, `INTEGER`.
- `Int64``BIGINT`.
## 无符号整型范围 {#uint-ranges}
- UInt8-\[0:255\]
- UInt16-\[0:65535\]
- UInt32-\[0:4294967295\]
- UInt64-\[0:18446744073709551615\]
- `UInt8` — \[0 : 255\]
- `UInt16` — \[0 : 65535\]
- `UInt32` — \[0 : 4294967295\]
- `UInt64` — \[0 : 18446744073709551615\]
- `UInt128` — \[0 : 340282366920938463463374607431768211455\]
- `UInt256` — \[0 : 115792089237316195423570985008687907853269984665640564039457584007913129639935\]
[源文档](https://clickhouse.com/docs/en/data_types/int_uint/) <!--hide-->

View File

@ -5,6 +5,6 @@ toc_title: Roadmap
# Roadmap {#roadmap}
`2021年Roadmap`已公布供公开讨论查看[这里](https://github.com/ClickHouse/ClickHouse/issues/17623).
`2022年Roadmap`已公布供公开讨论查看 [这里](https://github.com/ClickHouse/ClickHouse/issues/32513).
{## [源文章](https://clickhouse.com/docs/en/roadmap/) ##}

View File

@ -481,48 +481,76 @@ catch (...)
void Client::connect()
{
connection_parameters = ConnectionParameters(config());
if (is_interactive)
std::cout << "Connecting to "
<< (!connection_parameters.default_database.empty() ? "database " + connection_parameters.default_database + " at "
: "")
<< connection_parameters.host << ":" << connection_parameters.port
<< (!connection_parameters.user.empty() ? " as user " + connection_parameters.user : "") << "." << std::endl;
UInt16 default_port = ConnectionParameters::getPortFromConfig(config());
connection_parameters = ConnectionParameters(config(), hosts_ports[0].host,
hosts_ports[0].port.value_or(default_port));
String server_name;
UInt64 server_version_major = 0;
UInt64 server_version_minor = 0;
UInt64 server_version_patch = 0;
try
for (size_t attempted_address_index = 0; attempted_address_index < hosts_ports.size(); ++attempted_address_index)
{
connection = Connection::createConnection(connection_parameters, global_context);
connection_parameters.host = hosts_ports[attempted_address_index].host;
connection_parameters.port = hosts_ports[attempted_address_index].port.value_or(default_port);
if (max_client_network_bandwidth)
if (is_interactive)
std::cout << "Connecting to "
<< (!connection_parameters.default_database.empty() ? "database " + connection_parameters.default_database + " at "
: "")
<< connection_parameters.host << ":" << connection_parameters.port
<< (!connection_parameters.user.empty() ? " as user " + connection_parameters.user : "") << "." << std::endl;
try
{
ThrottlerPtr throttler = std::make_shared<Throttler>(max_client_network_bandwidth, 0, "");
connection->setThrottler(throttler);
}
connection = Connection::createConnection(connection_parameters, global_context);
connection->getServerVersion(
connection_parameters.timeouts, server_name, server_version_major, server_version_minor, server_version_patch, server_revision);
}
catch (const Exception & e)
{
/// It is typical when users install ClickHouse, type some password and instantly forget it.
if ((connection_parameters.user.empty() || connection_parameters.user == "default")
&& e.code() == DB::ErrorCodes::AUTHENTICATION_FAILED)
if (max_client_network_bandwidth)
{
ThrottlerPtr throttler = std::make_shared<Throttler>(max_client_network_bandwidth, 0, "");
connection->setThrottler(throttler);
}
connection->getServerVersion(
connection_parameters.timeouts, server_name, server_version_major, server_version_minor, server_version_patch, server_revision);
config().setString("host", connection_parameters.host);
config().setInt("port", connection_parameters.port);
break;
}
catch (const Exception & e)
{
std::cerr << std::endl
<< "If you have installed ClickHouse and forgot password you can reset it in the configuration file." << std::endl
<< "The password for default user is typically located at /etc/clickhouse-server/users.d/default-password.xml" << std::endl
<< "and deleting this file will reset the password." << std::endl
<< "See also /etc/clickhouse-server/users.xml on the server where ClickHouse is installed." << std::endl
<< std::endl;
}
/// It is typical when users install ClickHouse, type some password and instantly forget it.
/// This problem can't be fixed with reconnection so it is not attempted
if ((connection_parameters.user.empty() || connection_parameters.user == "default")
&& e.code() == DB::ErrorCodes::AUTHENTICATION_FAILED)
{
std::cerr << std::endl
<< "If you have installed ClickHouse and forgot password you can reset it in the configuration file." << std::endl
<< "The password for default user is typically located at /etc/clickhouse-server/users.d/default-password.xml" << std::endl
<< "and deleting this file will reset the password." << std::endl
<< "See also /etc/clickhouse-server/users.xml on the server where ClickHouse is installed." << std::endl
<< std::endl;
throw;
}
else
{
if (attempted_address_index == hosts_ports.size() - 1)
throw;
throw;
if (is_interactive)
{
std::cerr << "Connection attempt to database at "
<< connection_parameters.host << ":" << connection_parameters.port
<< " resulted in failure"
<< std::endl
<< getExceptionMessage(e, false)
<< std::endl
<< "Attempting connection to the next provided address"
<< std::endl;
}
}
}
}
server_version = toString(server_version_major) + "." + toString(server_version_minor) + "." + toString(server_version_patch);
@ -966,8 +994,11 @@ void Client::addOptions(OptionsDescription & options_description)
/// Main commandline options related to client functionality and all parameters from Settings.
options_description.main_description->add_options()
("config,c", po::value<std::string>(), "config-file path (another shorthand)")
("host,h", po::value<std::string>()->default_value("localhost"), "server host")
("port", po::value<int>()->default_value(9000), "server port")
("host,h", po::value<std::vector<HostPort>>()->multitoken()->default_value({{"localhost"}}, "localhost"),
"list of server hosts with optionally assigned port to connect. List elements are separated by a space."
"Every list element looks like '<host>[:<port>]'. If port isn't assigned, connection is made by port from '--port' param"
"Example of usage: '-h host1:1 host2 host3:3'")
("port", po::value<int>()->default_value(9000), "server port, which is default port for every host from '--host' param")
("secure,s", "Use TLS connection")
("user,u", po::value<std::string>()->default_value("default"), "user")
/** If "--password [value]" is used but the value is omitted, the bad argument exception will be thrown.
@ -1074,8 +1105,8 @@ void Client::processOptions(const OptionsDescription & options_description,
if (options.count("config"))
config().setString("config-file", options["config"].as<std::string>());
if (options.count("host") && !options["host"].defaulted())
config().setString("host", options["host"].as<std::string>());
if (options.count("host"))
hosts_ports = options["host"].as<std::vector<HostPort>>();
if (options.count("interleave-queries-file"))
interleave_queries_files = options["interleave-queries-file"].as<std::vector<std::string>>();
if (options.count("port") && !options["port"].defaulted())

View File

@ -57,8 +57,16 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
("seed", po::value<std::string>(), "seed (arbitrary string) that determines the result of obfuscation")
;
Settings cmd_settings;
for (const auto & field : cmd_settings.all())
{
if (field.getName() == "max_parser_depth" || field.getName() == "max_query_size")
cmd_settings.addProgramOption(desc, field);
}
boost::program_options::variables_map options;
boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), options);
po::notify(options);
if (options.count("help"))
{
@ -149,7 +157,8 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
ParserQuery parser(end);
do
{
ASTPtr res = parseQueryAndMovePosition(parser, pos, end, "query", multiple, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
ASTPtr res = parseQueryAndMovePosition(
parser, pos, end, "query", multiple, cmd_settings.max_query_size, cmd_settings.max_parser_depth);
/// For insert query with data(INSERT INTO ... VALUES ...), will lead to format fail,
/// should throw exception early and make exception message more readable.
if (const auto * insert_query = res->as<ASTInsertQuery>(); insert_query && insert_query->data)
@ -222,6 +231,5 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
std::cerr << getCurrentExceptionMessage(true) << '\n';
return getCurrentExceptionCode();
}
return 0;
}

View File

@ -315,7 +315,7 @@ void LocalServer::cleanup()
std::string LocalServer::getInitialCreateTableQuery()
{
if (!config().has("table-structure") && !config().has("table-file"))
if (!config().has("table-structure") && !config().has("table-file") && !config().has("table-data-format"))
return {};
auto table_name = backQuoteIfNeed(config().getString("table-name", "table"));

View File

@ -217,13 +217,12 @@
<!-- The following file is used only if ssl_require_client_auth=1 -->
<ssl_ca_cert_file>/path/to/ssl_ca_cert_file</ssl_ca_cert_file>
<!-- Default compression algorithm (applied if client doesn't specify another algorithm, see result_compression in QueryInfo).
<!-- Default transport compression type (can be overridden by client, see the transport_compression_type field in QueryInfo).
Supported algorithms: none, deflate, gzip, stream_gzip -->
<compression>deflate</compression>
<transport_compression_type>none</transport_compression_type>
<!-- Default compression level (applied if client doesn't specify another level, see result_compression in QueryInfo).
Supported levels: none, low, medium, high -->
<compression_level>medium</compression_level>
<!-- Default transport compression level. Supported levels: 0..3 -->
<transport_compression_level>0</transport_compression_level>
<!-- Send/receive message size limits in bytes. -1 means unlimited -->
<max_send_message_size>-1</max_send_message_size>

View File

@ -1,8 +1,8 @@
#include <Access/AccessRights.h>
#include <base/logger_useful.h>
#include <base/sort.h>
#include <boost/container/small_vector.hpp>
#include <boost/range/adaptor/map.hpp>
#include <boost/range/algorithm/sort.hpp>
#include <unordered_map>
namespace DB
@ -101,7 +101,7 @@ namespace
AccessRightsElements getResult() const
{
ProtoElements sorted = *this;
boost::range::sort(sorted);
::sort(sorted.begin(), sorted.end());
AccessRightsElements res;
res.reserve(sorted.size());

View File

@ -86,7 +86,7 @@ enum class AccessType
M(CREATE_DICTIONARY, "", DICTIONARY, CREATE) /* allows to execute {CREATE|ATTACH} DICTIONARY */\
M(CREATE_TEMPORARY_TABLE, "", GLOBAL, CREATE) /* allows to create and manipulate temporary tables;
implicitly enabled by the grant CREATE_TABLE on any table */ \
M(CREATE_FUNCTION, "", DATABASE, CREATE) /* allows to execute CREATE FUNCTION */ \
M(CREATE_FUNCTION, "", GLOBAL, CREATE) /* allows to execute CREATE FUNCTION */ \
M(CREATE, "", GROUP, ALL) /* allows to execute {CREATE|ATTACH} */ \
\
M(DROP_DATABASE, "", DATABASE, DROP) /* allows to execute {DROP|DETACH} DATABASE */\
@ -94,7 +94,7 @@ enum class AccessType
M(DROP_VIEW, "", VIEW, DROP) /* allows to execute {DROP|DETACH} TABLE for views;
implicitly enabled by the grant DROP_TABLE */\
M(DROP_DICTIONARY, "", DICTIONARY, DROP) /* allows to execute {DROP|DETACH} DICTIONARY */\
M(DROP_FUNCTION, "", DATABASE, DROP) /* allows to execute DROP FUNCTION */\
M(DROP_FUNCTION, "", GLOBAL, DROP) /* allows to execute DROP FUNCTION */\
M(DROP, "", GROUP, ALL) /* allows to execute {DROP|DETACH} */\
\
M(TRUNCATE, "TRUNCATE TABLE", TABLE, ALL) \
@ -166,6 +166,7 @@ enum class AccessType
M(dictGet, "dictHas, dictGetHierarchy, dictIsIn", DICTIONARY, ALL) /* allows to execute functions dictGet(), dictHas(), dictGetHierarchy(), dictIsIn() */\
\
M(addressToLine, "", GLOBAL, INTROSPECTION) /* allows to execute function addressToLine() */\
M(addressToLineWithInlines, "", GLOBAL, INTROSPECTION) /* allows to execute function addressToLineWithInlines() */\
M(addressToSymbol, "", GLOBAL, INTROSPECTION) /* allows to execute function addressToSymbol() */\
M(demangle, "", GLOBAL, INTROSPECTION) /* allows to execute function demangle() */\
M(INTROSPECTION, "INTROSPECTION FUNCTIONS", GROUP, ALL) /* allows to execute functions addressToLine(), addressToSymbol(), demangle()*/\

View File

@ -425,6 +425,7 @@ bool ContextAccess::checkAccessImplHelper(const AccessFlags & flags, const Args
| AccessType::TRUNCATE;
const AccessFlags dictionary_ddl = AccessType::CREATE_DICTIONARY | AccessType::DROP_DICTIONARY;
const AccessFlags function_ddl = AccessType::CREATE_FUNCTION | AccessType::DROP_FUNCTION;
const AccessFlags table_and_dictionary_ddl = table_ddl | dictionary_ddl;
const AccessFlags write_table_access = AccessType::INSERT | AccessType::OPTIMIZE;
const AccessFlags write_dcl_access = AccessType::ACCESS_MANAGEMENT - AccessType::SHOW_ACCESS;
@ -432,7 +433,7 @@ bool ContextAccess::checkAccessImplHelper(const AccessFlags & flags, const Args
const AccessFlags not_readonly_flags = write_table_access | table_and_dictionary_ddl | write_dcl_access | AccessType::SYSTEM | AccessType::KILL_QUERY;
const AccessFlags not_readonly_1_flags = AccessType::CREATE_TEMPORARY_TABLE;
const AccessFlags ddl_flags = table_ddl | dictionary_ddl;
const AccessFlags ddl_flags = table_ddl | dictionary_ddl | function_ddl;
const AccessFlags introspection_flags = AccessType::INTROSPECTION;
};
static const PrecalculatedFlags precalc;

View File

@ -7,8 +7,8 @@
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include <boost/range/algorithm/set_algorithm.hpp>
#include <boost/range/algorithm/sort.hpp>
#include <boost/range/algorithm_ext/push_back.hpp>
#include <base/sort.h>
namespace DB
@ -132,7 +132,7 @@ std::shared_ptr<ASTRolesOrUsersSet> RolesOrUsersSet::toAST() const
ast->names.reserve(ids.size());
for (const UUID & id : ids)
ast->names.emplace_back(::DB::toString(id));
boost::range::sort(ast->names);
::sort(ast->names.begin(), ast->names.end());
}
if (!except_ids.empty())
@ -140,7 +140,7 @@ std::shared_ptr<ASTRolesOrUsersSet> RolesOrUsersSet::toAST() const
ast->except_names.reserve(except_ids.size());
for (const UUID & except_id : except_ids)
ast->except_names.emplace_back(::DB::toString(except_id));
boost::range::sort(ast->except_names);
::sort(ast->except_names.begin(), ast->except_names.end());
}
return ast;
@ -161,7 +161,7 @@ std::shared_ptr<ASTRolesOrUsersSet> RolesOrUsersSet::toASTWithNames(const Access
if (name)
ast->names.emplace_back(std::move(*name));
}
boost::range::sort(ast->names);
::sort(ast->names.begin(), ast->names.end());
}
if (!except_ids.empty())
@ -173,7 +173,7 @@ std::shared_ptr<ASTRolesOrUsersSet> RolesOrUsersSet::toASTWithNames(const Access
if (except_name)
ast->except_names.emplace_back(std::move(*except_name));
}
boost::range::sort(ast->except_names);
::sort(ast->except_names.begin(), ast->except_names.end());
}
return ast;

View File

@ -45,7 +45,7 @@ TEST(AccessRights, Union)
lhs.grant(AccessType::INSERT);
rhs.grant(AccessType::ALL, "db1");
lhs.makeUnion(rhs);
ASSERT_EQ(lhs.toString(), "GRANT INSERT ON *.*, GRANT SHOW, SELECT, ALTER, CREATE DATABASE, CREATE TABLE, CREATE VIEW, CREATE DICTIONARY, CREATE FUNCTION, DROP, TRUNCATE, OPTIMIZE, SYSTEM MERGES, SYSTEM TTL MERGES, SYSTEM FETCHES, SYSTEM MOVES, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, SYSTEM RESTART REPLICA, SYSTEM RESTORE REPLICA, SYSTEM FLUSH DISTRIBUTED, dictGet ON db1.*");
ASSERT_EQ(lhs.toString(), "GRANT INSERT ON *.*, GRANT SHOW, SELECT, ALTER, CREATE DATABASE, CREATE TABLE, CREATE VIEW, CREATE DICTIONARY, DROP DATABASE, DROP TABLE, DROP VIEW, DROP DICTIONARY, TRUNCATE, OPTIMIZE, SYSTEM MERGES, SYSTEM TTL MERGES, SYSTEM FETCHES, SYSTEM MOVES, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, SYSTEM RESTART REPLICA, SYSTEM RESTORE REPLICA, SYSTEM FLUSH DISTRIBUTED, dictGet ON db1.*");
}

View File

@ -90,11 +90,11 @@ struct AggregateFunctionIntervalLengthSumData
void sort()
{
if (!sorted)
{
::sort(std::begin(segments), std::end(segments));
sorted = true;
}
if (sorted)
return;
::sort(std::begin(segments), std::end(segments));
sorted = true;
}
void serialize(WriteBuffer & buf) const

View File

@ -75,11 +75,11 @@ struct AggregateFunctionSequenceMatchData final
void sort()
{
if (!sorted)
{
::sort(std::begin(events_list), std::end(events_list), Comparator{});
sorted = true;
}
if (sorted)
return;
::sort(std::begin(events_list), std::end(events_list), Comparator{});
sorted = true;
}
void serialize(WriteBuffer & buf) const

View File

@ -239,6 +239,7 @@ private:
UInt64 genRandom(size_t lim)
{
assert(lim > 0);
/// With a large number of values, we will generate random numbers several times slower.
if (lim <= static_cast<UInt64>(rng.max()))
return static_cast<UInt32>(rng()) % static_cast<UInt32>(lim);

View File

@ -260,7 +260,8 @@ private:
if (sorted)
return;
::sort(samples.begin(), samples.end(), [](const auto & lhs, const auto & rhs) { return lhs.first < rhs.first; });
/// In order to provide deterministic result we must sort by value and hash
::sort(samples.begin(), samples.end(), [](const auto & lhs, const auto & rhs) { return lhs < rhs; });
sorted = true;
}

View File

@ -79,6 +79,7 @@ set(dbms_sources)
add_headers_and_sources(clickhouse_common_io Common)
add_headers_and_sources(clickhouse_common_io Common/HashTable)
add_headers_and_sources(clickhouse_common_io IO)
add_headers_and_sources(clickhouse_common_io IO/Archives)
add_headers_and_sources(clickhouse_common_io IO/S3)
list (REMOVE_ITEM clickhouse_common_io_sources Common/malloc.cpp Common/new_delete.cpp)
@ -508,6 +509,10 @@ if (TARGET ch_contrib::bzip2)
target_link_libraries (clickhouse_common_io PRIVATE ch_contrib::bzip2)
endif()
if (TARGET ch_contrib::minizip)
target_link_libraries (clickhouse_common_io PRIVATE ch_contrib::minizip)
endif ()
if (TARGET ch_contrib::simdjson)
dbms_target_link_libraries(PRIVATE ch_contrib::simdjson)
endif()

View File

@ -1317,7 +1317,7 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
if (insert && insert->select)
insert->tryFindInputFunction(input_function);
bool is_async_insert = global_context->getSettings().async_insert && insert && insert->hasInlinedData();
bool is_async_insert = global_context->getSettingsRef().async_insert && insert && insert->hasInlinedData();
/// INSERT query for which data transfer is needed (not an INSERT SELECT or input()) is processed separately.
if (insert && (!insert->select || input_function) && !insert->watch && !is_async_insert)
@ -1929,7 +1929,7 @@ void ClientBase::init(int argc, char ** argv)
/// Output of help message.
if (options.count("help")
|| (options.count("host") && options["host"].as<std::string>() == "elp")) /// If user writes -help instead of --help.
|| (options.count("host") && options["host"].as<std::vector<HostPort>>()[0].host == "elp")) /// If user writes -help instead of --help.
{
printHelpMessage(options_description);
exit(0);

View File

@ -5,6 +5,7 @@
#include <Common/InterruptListener.h>
#include <Common/ShellCommand.h>
#include <Common/Stopwatch.h>
#include <Common/DNSResolver.h>
#include <Core/ExternalTable.h>
#include <Poco/Util/Application.h>
#include <Interpreters/Context.h>
@ -243,6 +244,25 @@ protected:
} profile_events;
QueryProcessingStage::Enum query_processing_stage;
struct HostPort
{
String host;
std::optional<UInt16> port{};
friend std::istream & operator>>(std::istream & in, HostPort & hostPort)
{
String host_with_port;
in >> host_with_port;
DB::DNSResolver & resolver = DB::DNSResolver::instance();
std::pair<Poco::Net::IPAddress, std::optional<UInt16>>
host_and_port = resolver.resolveHostOrAddress(host_with_port);
hostPort.host = host_and_port.first.toString();
hostPort.port = host_and_port.second;
return in;
}
};
std::vector<HostPort> hosts_ports{};
};
}

View File

@ -23,15 +23,13 @@ namespace ErrorCodes
extern const int BAD_ARGUMENTS;
}
ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfiguration & config)
ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfiguration & config,
std::string connection_host,
int connection_port) : host(connection_host), port(connection_port)
{
bool is_secure = config.getBool("secure", false);
security = is_secure ? Protocol::Secure::Enable : Protocol::Secure::Disable;
host = config.getString("host", "localhost");
port = config.getInt(
"port", config.getInt(is_secure ? "tcp_port_secure" : "tcp_port", is_secure ? DBMS_DEFAULT_SECURE_PORT : DBMS_DEFAULT_PORT));
default_database = config.getString("database", "");
/// changed the default value to "default" to fix the issue when the user in the prompt is blank
@ -61,12 +59,25 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati
/// By default compression is disabled if address looks like localhost.
compression = config.getBool("compression", !isLocalAddress(DNSResolver::instance().resolveHost(host)))
? Protocol::Compression::Enable : Protocol::Compression::Disable;
? Protocol::Compression::Enable : Protocol::Compression::Disable;
timeouts = ConnectionTimeouts(
Poco::Timespan(config.getInt("connect_timeout", DBMS_DEFAULT_CONNECT_TIMEOUT_SEC), 0),
Poco::Timespan(config.getInt("send_timeout", DBMS_DEFAULT_SEND_TIMEOUT_SEC), 0),
Poco::Timespan(config.getInt("receive_timeout", DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC), 0),
Poco::Timespan(config.getInt("tcp_keep_alive_timeout", 0), 0));
Poco::Timespan(config.getInt("connect_timeout", DBMS_DEFAULT_CONNECT_TIMEOUT_SEC), 0),
Poco::Timespan(config.getInt("send_timeout", DBMS_DEFAULT_SEND_TIMEOUT_SEC), 0),
Poco::Timespan(config.getInt("receive_timeout", DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC), 0),
Poco::Timespan(config.getInt("tcp_keep_alive_timeout", 0), 0));
}
ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfiguration & config)
: ConnectionParameters(config, config.getString("host", "localhost"), getPortFromConfig(config))
{
}
int ConnectionParameters::getPortFromConfig(const Poco::Util::AbstractConfiguration & config)
{
bool is_secure = config.getBool("secure", false);
return config.getInt("port",
config.getInt(is_secure ? "tcp_port_secure" : "tcp_port",
is_secure ? DBMS_DEFAULT_SECURE_PORT : DBMS_DEFAULT_PORT));
}
}

View File

@ -24,6 +24,9 @@ struct ConnectionParameters
ConnectionParameters() {}
ConnectionParameters(const Poco::Util::AbstractConfiguration & config);
ConnectionParameters(const Poco::Util::AbstractConfiguration & config, std::string host, int port);
static int getPortFromConfig(const Poco::Util::AbstractConfiguration & config);
};
}

View File

@ -74,6 +74,8 @@ void LocalConnection::sendQuery(
query_context->setProgressCallback([this] (const Progress & value) { return this->updateProgress(value); });
query_context->setFileProgressCallback([this](const FileProgress & value) { this->updateProgress(Progress(value)); });
}
if (!current_database.empty())
query_context->setCurrentDatabase(current_database);
CurrentThread::QueryScope query_scope_holder(query_context);
@ -427,9 +429,9 @@ void LocalConnection::getServerVersion(
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented");
}
void LocalConnection::setDefaultDatabase(const String &)
void LocalConnection::setDefaultDatabase(const String & database)
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented");
current_database = database;
}
UInt64 LocalConnection::getServerRevision(const ConnectionTimeouts &)

View File

@ -142,5 +142,7 @@ private:
/// Last "server" packet.
std::optional<UInt64> next_packet_type;
String current_database;
};
}

View File

@ -50,12 +50,12 @@ ColumnArray::ColumnArray(MutableColumnPtr && nested_column, MutableColumnPtr &&
if (!offsets_concrete)
throw Exception("offsets_column must be a ColumnUInt64", ErrorCodes::LOGICAL_ERROR);
if (!offsets_concrete->empty() && nested_column)
if (!offsets_concrete->empty() && data)
{
Offset last_offset = offsets_concrete->getData().back();
/// This will also prevent possible overflow in offset.
if (nested_column->size() != last_offset)
if (data->size() != last_offset)
throw Exception("offsets_column has data inconsistent with nested_column", ErrorCodes::LOGICAL_ERROR);
}

View File

@ -202,6 +202,45 @@ Poco::Net::SocketAddress DNSResolver::resolveAddress(const std::string & host, U
return Poco::Net::SocketAddress(impl->cache_host(host).front(), port);
}
std::pair<Poco::Net::IPAddress, std::optional<UInt16>> DNSResolver::resolveHostOrAddress(const std::string & host_and_port)
{
Poco::Net::IPAddress ip;
size_t number_of_colons = std::count(host_and_port.begin(), host_and_port.end(), ':');
if (number_of_colons > 1)
{
/// IPv6 host
if (host_and_port.starts_with('['))
{
size_t close_bracket_pos = host_and_port.find(']');
assert(close_bracket_pos != std::string::npos);
ip = resolveHost(host_and_port.substr(0, close_bracket_pos));
if (close_bracket_pos == host_and_port.size() - 1)
return {ip, std::nullopt};
if (host_and_port[close_bracket_pos + 1] != ':')
throw Exception("Missing delimiter between host and port", ErrorCodes::BAD_ARGUMENTS);
unsigned int port;
if (!Poco::NumberParser::tryParseUnsigned(host_and_port.substr(close_bracket_pos + 2), port))
throw Exception("Port must be numeric", ErrorCodes::BAD_ARGUMENTS);
if (port > 0xFFFF)
throw Exception("Port must be less 0xFFFF", ErrorCodes::BAD_ARGUMENTS);
return {ip, port};
}
return {resolveHost(host_and_port), std::nullopt};
}
else if (number_of_colons == 1)
{
/// IPv4 host with port
Poco::Net::SocketAddress socket = resolveAddress(host_and_port);
return {socket.host(), socket.port()};
}
/// IPv4 host
return {resolveHost(host_and_port), std::nullopt};
}
String DNSResolver::reverseResolve(const Poco::Net::IPAddress & address)
{
if (impl->disable_cache)

View File

@ -34,6 +34,10 @@ public:
Poco::Net::SocketAddress resolveAddress(const std::string & host, UInt16 port);
/// Accepts host names like 'example.com'/'example.com:port' or '127.0.0.1'/'127.0.0.1:port' or '::1'/'[::1]:port'
/// and resolves its IP and port, if port is set
std::pair<Poco::Net::IPAddress, std::optional<UInt16>> resolveHostOrAddress(const std::string & host_and_port);
/// Accepts host IP and resolves its host name
String reverseResolve(const Poco::Net::IPAddress & address);

View File

@ -610,6 +610,8 @@
M(639, SNAPPY_COMPRESS_FAILED) \
M(640, NO_HIVEMETASTORE) \
M(641, CANNOT_APPEND_TO_FILE) \
M(642, CANNOT_PACK_ARCHIVE) \
M(643, CANNOT_UNPACK_ARCHIVE) \
\
M(999, KEEPER_EXCEPTION) \
M(1000, POCO_EXCEPTION) \

View File

@ -281,6 +281,10 @@
M(ExternalDataSourceLocalCacheReadBytes, "Bytes read from local cache buffer in RemoteReadBufferCache")\
\
M(MainConfigLoads, "Number of times the main configuration was reloaded.") \
\
M(ScalarSubqueriesGlobalCacheHit, "Number of times a read from a scalar subquery was done using the global cache") \
M(ScalarSubqueriesLocalCacheHit, "Number of times a read from a scalar subquery was done using the local cache") \
M(ScalarSubqueriesCacheMiss, "Number of times a read from a scalar subquery was not cached and had to be calculated completely")
namespace ProfileEvents
{

View File

@ -17,6 +17,7 @@
#cmakedefine01 USE_YAML_CPP
#cmakedefine01 CLICKHOUSE_SPLIT_BINARY
#cmakedefine01 USE_BZIP2
#cmakedefine01 USE_MINIZIP
#cmakedefine01 USE_SNAPPY
#cmakedefine01 USE_HIVE
#cmakedefine01 USE_ODBC

View File

@ -105,7 +105,7 @@ void CachedCompressedReadBuffer::seek(size_t offset_in_compressed_file, size_t o
/// We will discard our working_buffer, but have to account rest bytes
bytes += offset();
/// No data, everything discarded
pos = working_buffer.end();
resetWorkingBuffer();
owned_cell.reset();
/// Remember required offset in decompressed block which will be set in

View File

@ -80,7 +80,7 @@ void CompressedReadBufferFromFile::seek(size_t offset_in_compressed_file, size_t
/// We will discard our working_buffer, but have to account rest bytes
bytes += offset();
/// No data, everything discarded
pos = working_buffer.end();
resetWorkingBuffer();
size_compressed = 0;
/// Remember required offset in decompressed block which will be set in
/// the next ReadBuffer::next() call
@ -113,7 +113,6 @@ size_t CompressedReadBufferFromFile::readBig(char * to, size_t n)
/// need to skip some bytes in decompressed data (seek happened before readBig call).
if (nextimpl_working_buffer_offset == 0 && size_decompressed + additional_size_at_the_end_of_buffer <= n - bytes_read)
{
decompressTo(to + bytes_read, size_decompressed, size_compressed_without_checksum);
bytes_read += size_decompressed;
bytes += size_decompressed;

View File

@ -36,6 +36,8 @@ private:
/// Allows to avoid additional copies in updateValue function
size_t snapshot_up_to_size = 0;
ArenaWithFreeLists arena;
/// Collect invalid iterators to avoid traversing the whole list
std::vector<Mapped> snapshot_invalid_iters;
uint64_t approximate_data_size{0};
@ -175,6 +177,7 @@ public:
list_itr->active_in_map = false;
auto new_list_itr = list.insert(list.end(), elem);
it->getMapped() = new_list_itr;
snapshot_invalid_iters.push_back(list_itr);
}
else
{
@ -195,6 +198,7 @@ public:
if (snapshot_mode)
{
list_itr->active_in_map = false;
snapshot_invalid_iters.push_back(list_itr);
list_itr->free_key = true;
map.erase(it->getKey());
}
@ -235,6 +239,7 @@ public:
{
auto elem_copy = *(list_itr);
list_itr->active_in_map = false;
snapshot_invalid_iters.push_back(list_itr);
updater(elem_copy.value);
auto itr = list.insert(list.end(), elem_copy);
it->getMapped() = itr;
@ -274,23 +279,15 @@ public:
void clearOutdatedNodes()
{
auto start = list.begin();
auto end = list.end();
for (auto itr = start; itr != end;)
for (auto & itr: snapshot_invalid_iters)
{
if (!itr->active_in_map)
{
updateDataSize(CLEAR_OUTDATED_NODES, itr->key.size, itr->value.sizeInBytes(), 0);
if (itr->free_key)
arena.free(const_cast<char *>(itr->key.data), itr->key.size);
itr = list.erase(itr);
}
else
{
assert(!itr->free_key);
itr++;
}
assert(!itr->active_in_map);
updateDataSize(CLEAR_OUTDATED_NODES, itr->key.size, itr->value.sizeInBytes(), 0);
if (itr->free_key)
arena.free(const_cast<char *>(itr->key.data), itr->key.size);
list.erase(itr);
}
snapshot_invalid_iters.clear();
}
void clear()
@ -310,7 +307,6 @@ public:
void disableSnapshotMode()
{
snapshot_mode = false;
snapshot_up_to_size = 0;
}

View File

@ -755,4 +755,30 @@ void materializeBlockInplace(Block & block)
block.getByPosition(i).column = recursiveRemoveSparse(block.getByPosition(i).column->convertToFullColumnIfConst());
}
Block concatenateBlocks(const std::vector<Block> & blocks)
{
if (blocks.empty())
return {};
size_t num_rows = 0;
for (const auto & block : blocks)
num_rows += block.rows();
Block out = blocks[0].cloneEmpty();
MutableColumns columns = out.mutateColumns();
for (size_t i = 0; i < columns.size(); ++i)
{
columns[i]->reserve(num_rows);
for (const auto & block : blocks)
{
const auto & tmp_column = *block.getByPosition(i).column;
columns[i]->insertRangeFrom(tmp_column, 0, block.rows());
}
}
out.setColumns(std::move(columns));
return out;
}
}

View File

@ -203,4 +203,6 @@ ColumnPtr getColumnFromBlock(const Block & block, const NameAndTypePair & column
Block materializeBlock(const Block & block);
void materializeBlockInplace(Block & block);
Block concatenateBlocks(const std::vector<Block> & blocks);
}

View File

@ -85,16 +85,18 @@ void Settings::addProgramOptions(boost::program_options::options_description & o
{
for (const auto & field : all())
{
const std::string_view name = field.getName();
auto on_program_option
= boost::function1<void, const std::string &>([this, name](const std::string & value) { set(name, value); });
options.add(boost::shared_ptr<boost::program_options::option_description>(new boost::program_options::option_description(
name.data(),
boost::program_options::value<std::string>()->composing()->notifier(on_program_option),
field.getDescription())));
addProgramOption(options, field);
}
}
void Settings::addProgramOption(boost::program_options::options_description & options, const SettingFieldRef & field)
{
const std::string_view name = field.getName();
auto on_program_option = boost::function1<void, const std::string &>([this, name](const std::string & value) { set(name, value); });
options.add(boost::shared_ptr<boost::program_options::option_description>(new boost::program_options::option_description(
name.data(), boost::program_options::value<std::string>()->composing()->notifier(on_program_option), field.getDescription())));
}
void Settings::checkNoSettingNamesAtTopLevel(const Poco::Util::AbstractConfiguration & config, const String & config_path)
{
if (config.getBool("skip_check_for_incorrect_settings", false))

View File

@ -70,7 +70,9 @@ class IColumn;
M(UInt64, idle_connection_timeout, 3600, "Close idle TCP connections after specified number of seconds.", 0) \
M(UInt64, distributed_connections_pool_size, 1024, "Maximum number of connections with one remote server in the pool.", 0) \
M(UInt64, connections_with_failover_max_tries, DBMS_CONNECTION_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES, "The maximum number of attempts to connect to replicas.", 0) \
M(UInt64, s3_min_upload_part_size, 32*1024*1024, "The minimum size of part to upload during multipart upload to S3.", 0) \
M(UInt64, s3_min_upload_part_size, 16*1024*1024, "The minimum size of part to upload during multipart upload to S3.", 0) \
M(UInt64, s3_upload_part_size_multiply_factor, 2, "Multiply s3_min_upload_part_size by this factor each time s3_multiply_parts_count_threshold parts were uploaded from a single write to S3.", 0) \
M(UInt64, s3_upload_part_size_multiply_parts_count_threshold, 1000, "Each time this number of parts was uploaded to S3 s3_min_upload_part_size multiplied by s3_upload_part_size_multiply_factor.", 0) \
M(UInt64, s3_max_single_part_upload_size, 32*1024*1024, "The maximum size of object to upload using singlepart upload to S3.", 0) \
M(UInt64, s3_max_single_read_retries, 4, "The maximum number of retries during single S3 read.", 0) \
M(UInt64, s3_max_redirects, 10, "Max number of S3 redirects hops allowed.", 0) \
@ -262,6 +264,7 @@ class IColumn;
M(UInt64, http_max_fields, 1000000, "Maximum number of fields in HTTP header", 0) \
M(UInt64, http_max_field_name_size, 1048576, "Maximum length of field name in HTTP header", 0) \
M(UInt64, http_max_field_value_size, 1048576, "Maximum length of field value in HTTP header", 0) \
M(Bool, http_skip_not_found_url_for_globs, true, "Skip url's for globs with HTTP_NOT_FOUND error", 0) \
M(Bool, optimize_throw_if_noop, false, "If setting is enabled and OPTIMIZE query didn't actually assign a merge then an explanatory exception is thrown", 0) \
M(Bool, use_index_for_in_with_subqueries, true, "Try using an index if there is a subquery or a table expression on the right side of the IN operator.", 0) \
M(Bool, joined_subquery_requires_alias, true, "Force joined subqueries and table functions to have aliases for correct name qualification.", 0) \
@ -610,6 +613,7 @@ class IColumn;
M(Char, input_format_hive_text_collection_items_delimiter, '\x02', "Delimiter between collection(array or map) items in Hive Text File", 0) \
M(Char, input_format_hive_text_map_keys_delimiter, '\x03', "Delimiter between a pair of map key/values in Hive Text File", 0) \
M(UInt64, input_format_msgpack_number_of_columns, 0, "The number of columns in inserted MsgPack data. Used for automatic schema inference from data.", 0) \
M(MsgPackUUIDRepresentation, output_format_msgpack_uuid_representation, FormatSettings::MsgPackUUIDRepresentation::EXT, "The way how to output UUID in MsgPack format.", 0) \
M(UInt64, input_format_max_rows_to_read_for_schema_inference, 100, "The maximum rows of data to read for automatic schema inference", 0) \
\
M(DateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic' and 'best_effort'.", 0) \
@ -717,6 +721,8 @@ struct Settings : public BaseSettings<SettingsTraits>, public IHints<2, Settings
static void checkNoSettingNamesAtTopLevel(const Poco::Util::AbstractConfiguration & config, const String & config_path);
std::vector<String> getAllRegisteredNames() const override;
void addProgramOption(boost::program_options::options_description & options, const SettingFieldRef & field);
};
/*

View File

@ -130,4 +130,10 @@ IMPLEMENT_SETTING_ENUM(EscapingRule, ErrorCodes::BAD_ARGUMENTS,
{"JSON", FormatSettings::EscapingRule::JSON},
{"XML", FormatSettings::EscapingRule::XML},
{"Raw", FormatSettings::EscapingRule::Raw}})
IMPLEMENT_SETTING_ENUM(MsgPackUUIDRepresentation , ErrorCodes::BAD_ARGUMENTS,
{{"bin", FormatSettings::MsgPackUUIDRepresentation::BIN},
{"str", FormatSettings::MsgPackUUIDRepresentation::STR},
{"ext", FormatSettings::MsgPackUUIDRepresentation::EXT}})
}

View File

@ -172,4 +172,6 @@ DECLARE_SETTING_ENUM_WITH_RENAME(EnumComparingMode, FormatSettings::EnumComparin
DECLARE_SETTING_ENUM_WITH_RENAME(EscapingRule, FormatSettings::EscapingRule)
DECLARE_SETTING_ENUM_WITH_RENAME(MsgPackUUIDRepresentation, FormatSettings::MsgPackUUIDRepresentation)
}

View File

@ -13,7 +13,7 @@ private:
DataTypePtr dictionary_type;
public:
DataTypeLowCardinality(DataTypePtr dictionary_type_);
explicit DataTypeLowCardinality(DataTypePtr dictionary_type_);
const DataTypePtr & getDictionaryType() const { return dictionary_type; }

View File

@ -167,8 +167,10 @@ String getNameForSubstreamPath(
/// Because nested data may be represented not by Array of Tuple,
/// but by separate Array columns with names in a form of a.b,
/// and name is encoded as a whole.
stream_name += (escape_tuple_delimiter && it->escape_tuple_delimiter ?
escapeForFileName(".") : ".") + escapeForFileName(it->tuple_element_name);
if (escape_tuple_delimiter && it->escape_tuple_delimiter)
stream_name += escapeForFileName("." + it->tuple_element_name);
else
stream_name += "." + it->tuple_element_name;
}
}

View File

@ -78,7 +78,9 @@ ASTPtr DatabaseMemory::getCreateDatabaseQuery() const
auto create_query = std::make_shared<ASTCreateQuery>();
create_query->setDatabase(getDatabaseName());
create_query->set(create_query->storage, std::make_shared<ASTStorage>());
create_query->storage->set(create_query->storage->engine, makeASTFunction(getEngineName()));
auto engine = makeASTFunction(getEngineName());
engine->no_empty_args = true;
create_query->storage->set(create_query->storage->engine, engine);
if (const auto comment_value = getDatabaseComment(); !comment_value.empty())
create_query->set(create_query->comment, std::make_shared<ASTLiteral>(comment_value));

View File

@ -316,7 +316,7 @@ getTableOutput(const String & database_name, const String & table_name, ContextM
return std::move(res.pipeline);
}
static inline String reWriteMysqlQueryColumn(mysqlxx::Pool::Entry & connection, const String & database_name, const String & table_name, const Settings & global_settings)
static inline String rewriteMysqlQueryColumn(mysqlxx::Pool::Entry & connection, const String & database_name, const String & table_name, const Settings & global_settings)
{
Block tables_columns_sample_block
{
@ -376,7 +376,7 @@ static inline void dumpDataForTables(
auto pipeline = getTableOutput(database_name, table_name, query_context);
StreamSettings mysql_input_stream_settings(context->getSettingsRef());
String mysql_select_all_query = "SELECT " + reWriteMysqlQueryColumn(connection, mysql_database_name, table_name, context->getSettings()) + " FROM "
String mysql_select_all_query = "SELECT " + rewriteMysqlQueryColumn(connection, mysql_database_name, table_name, context->getSettingsRef()) + " FROM "
+ backQuoteIfNeed(mysql_database_name) + "." + backQuoteIfNeed(table_name);
LOG_INFO(&Poco::Logger::get("MaterializedMySQLSyncThread(" + database_name + ")"), "mysql_select_all_query is {}", mysql_select_all_query);
auto input = std::make_unique<MySQLSource>(connection, mysql_select_all_query, pipeline.getHeader(), mysql_input_stream_settings);

View File

@ -197,7 +197,7 @@ void registerDictionarySourceExecutablePool(DictionarySourceFactory & factory)
size_t max_command_execution_time = config.getUInt64(settings_config_prefix + ".max_command_execution_time", 10);
size_t max_execution_time_seconds = static_cast<size_t>(context->getSettings().max_execution_time.totalSeconds());
size_t max_execution_time_seconds = static_cast<size_t>(context->getSettingsRef().max_execution_time.totalSeconds());
if (max_execution_time_seconds != 0 && max_command_execution_time > max_execution_time_seconds)
max_command_execution_time = max_execution_time_seconds;

View File

@ -243,7 +243,7 @@ off_t AsynchronousReadIndirectBufferFromRemoteFS::seek(off_t offset_, int whence
prefetch_future = {};
}
pos = working_buffer.end();
resetWorkingBuffer();
/**
* Lazy ignore. Save number of bytes to ignore and ignore it either for prefetch buffer or current buffer.

View File

@ -64,7 +64,7 @@ off_t ReadIndirectBufferFromRemoteFS::seek(off_t offset_, int whence)
throw Exception("Only SEEK_SET or SEEK_CUR modes are allowed.", ErrorCodes::CANNOT_SEEK_THROUGH_FILE);
impl->reset();
pos = working_buffer.end();
resetWorkingBuffer();
return impl->file_offset_of_buffer_end;
}

View File

@ -283,6 +283,8 @@ std::unique_ptr<WriteBufferFromFileBase> DiskS3::writeFile(const String & path,
bucket,
metadata.remote_fs_root_path + s3_path,
settings->s3_min_upload_part_size,
settings->s3_upload_part_size_multiply_factor,
settings->s3_upload_part_size_multiply_parts_count_threshold,
settings->s3_max_single_part_upload_size,
std::move(object_metadata),
buf_size,
@ -338,6 +340,8 @@ void DiskS3::createFileOperationObject(const String & operation_name, UInt64 rev
bucket,
remote_fs_root_path + key,
settings->s3_min_upload_part_size,
settings->s3_upload_part_size_multiply_factor,
settings->s3_upload_part_size_multiply_parts_count_threshold,
settings->s3_max_single_part_upload_size,
metadata);
@ -417,6 +421,8 @@ void DiskS3::saveSchemaVersion(const int & version)
bucket,
remote_fs_root_path + SCHEMA_VERSION_OBJECT,
settings->s3_min_upload_part_size,
settings->s3_upload_part_size_multiply_factor,
settings->s3_upload_part_size_multiply_parts_count_threshold,
settings->s3_max_single_part_upload_size);
writeIntText(version, buffer);
@ -1076,6 +1082,8 @@ DiskS3Settings::DiskS3Settings(
const std::shared_ptr<Aws::S3::S3Client> & client_,
size_t s3_max_single_read_retries_,
size_t s3_min_upload_part_size_,
size_t s3_upload_part_size_multiply_factor_,
size_t s3_upload_part_size_multiply_parts_count_threshold_,
size_t s3_max_single_part_upload_size_,
size_t min_bytes_for_seek_,
bool send_metadata_,
@ -1085,6 +1093,8 @@ DiskS3Settings::DiskS3Settings(
: client(client_)
, s3_max_single_read_retries(s3_max_single_read_retries_)
, s3_min_upload_part_size(s3_min_upload_part_size_)
, s3_upload_part_size_multiply_factor(s3_upload_part_size_multiply_factor_)
, s3_upload_part_size_multiply_parts_count_threshold(s3_upload_part_size_multiply_parts_count_threshold_)
, s3_max_single_part_upload_size(s3_max_single_part_upload_size_)
, min_bytes_for_seek(min_bytes_for_seek_)
, send_metadata(send_metadata_)

View File

@ -29,6 +29,8 @@ struct DiskS3Settings
const std::shared_ptr<Aws::S3::S3Client> & client_,
size_t s3_max_single_read_retries_,
size_t s3_min_upload_part_size_,
size_t s3_upload_part_size_multiply_factor_,
size_t s3_upload_part_size_multiply_parts_count_threshold_,
size_t s3_max_single_part_upload_size_,
size_t min_bytes_for_seek_,
bool send_metadata_,
@ -39,6 +41,8 @@ struct DiskS3Settings
std::shared_ptr<Aws::S3::S3Client> client;
size_t s3_max_single_read_retries;
size_t s3_min_upload_part_size;
size_t s3_upload_part_size_multiply_factor;
size_t s3_upload_part_size_multiply_parts_count_threshold;
size_t s3_max_single_part_upload_size;
size_t min_bytes_for_seek;
bool send_metadata;

View File

@ -155,6 +155,8 @@ std::unique_ptr<DiskS3Settings> getSettings(const Poco::Util::AbstractConfigurat
getClient(config, config_prefix, context),
config.getUInt64(config_prefix + ".s3_max_single_read_retries", context->getSettingsRef().s3_max_single_read_retries),
config.getUInt64(config_prefix + ".s3_min_upload_part_size", context->getSettingsRef().s3_min_upload_part_size),
config.getUInt64(config_prefix + ".s3_upload_part_size_multiply_factor", context->getSettingsRef().s3_upload_part_size_multiply_factor),
config.getUInt64(config_prefix + ".s3_upload_part_size_multiply_parts_count_threshold", context->getSettingsRef().s3_upload_part_size_multiply_parts_count_threshold),
config.getUInt64(config_prefix + ".s3_max_single_part_upload_size", context->getSettingsRef().s3_max_single_part_upload_size),
config.getUInt64(config_prefix + ".min_bytes_for_seek", 1024 * 1024),
config.getBool(config_prefix + ".send_metadata", false),

View File

@ -129,6 +129,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings)
format_settings.capn_proto.enum_comparing_mode = settings.format_capn_proto_enum_comparising_mode;
format_settings.seekable_read = settings.input_format_allow_seeks;
format_settings.msgpack.number_of_columns = settings.input_format_msgpack_number_of_columns;
format_settings.msgpack.output_uuid_representation = settings.output_format_msgpack_uuid_representation;
format_settings.max_rows_to_read_for_schema_inference = settings.input_format_max_rows_to_read_for_schema_inference;
/// Validate avro_schema_registry_url with RemoteHostFilter when non-empty and in Server context

View File

@ -231,9 +231,17 @@ struct FormatSettings
EnumComparingMode enum_comparing_mode = EnumComparingMode::BY_VALUES;
} capn_proto;
enum class MsgPackUUIDRepresentation
{
STR, // Output UUID as a string of 36 characters.
BIN, // Output UUID as 16-bytes binary.
EXT, // Output UUID as ExtType = 2
};
struct
{
UInt64 number_of_columns = 0;
MsgPackUUIDRepresentation output_uuid_representation = MsgPackUUIDRepresentation::EXT;
} msgpack;
};

View File

@ -0,0 +1,11 @@
#pragma once
namespace DB
{
enum class MsgPackExtensionTypes
{
UUID = 0x02,
};
}

View File

@ -1,153 +1,58 @@
#if defined(__ELF__) && !defined(__FreeBSD__)
#include <Common/Dwarf.h>
#include <Common/SymbolIndex.h>
#include <Common/HashTable/HashMap.h>
#include <Common/Arena.h>
#include <Columns/ColumnString.h>
#include <Columns/ColumnsNumber.h>
#include <DataTypes/DataTypeString.h>
#include <Functions/IFunction.h>
#include <Functions/FunctionFactory.h>
#include <IO/WriteBufferFromArena.h>
#include <IO/WriteHelpers.h>
#include <Access/Common/AccessFlags.h>
#include <Interpreters/Context.h>
#include <mutex>
#include <filesystem>
#include <unordered_map>
#include <Functions/addressToLine.h>
namespace DB
{
namespace ErrorCodes
{
extern const int ILLEGAL_COLUMN;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
}
namespace
{
class FunctionAddressToLine : public IFunction
class FunctionAddressToLine: public FunctionAddressToLineBase<StringRef, Dwarf::LocationInfoMode::FAST>
{
public:
static constexpr auto name = "addressToLine";
String getName() const override { return name; }
static FunctionPtr create(ContextPtr context)
{
context->checkAccess(AccessType::addressToLine);
return std::make_shared<FunctionAddressToLine>();
}
String getName() const override
protected:
DataTypePtr getDataType() const override
{
return name;
}
size_t getNumberOfArguments() const override
{
return 1;
}
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{
if (arguments.size() != 1)
throw Exception("Function " + getName() + " needs exactly one argument; passed "
+ toString(arguments.size()) + ".", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
const auto & type = arguments[0].type;
if (!WhichDataType(type.get()).isUInt64())
throw Exception("The only argument for function " + getName() + " must be UInt64. Found "
+ type->getName() + " instead.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
return std::make_shared<DataTypeString>();
}
bool useDefaultImplementationForConstants() const override
ColumnPtr getResultColumn(const typename ColumnVector<UInt64>::Container & data, size_t input_rows_count) const override
{
return true;
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
const ColumnPtr & column = arguments[0].column;
const ColumnUInt64 * column_concrete = checkAndGetColumn<ColumnUInt64>(column.get());
if (!column_concrete)
throw Exception("Illegal column " + column->getName() + " of argument of function " + getName(), ErrorCodes::ILLEGAL_COLUMN);
const typename ColumnVector<UInt64>::Container & data = column_concrete->getData();
auto result_column = ColumnString::create();
for (size_t i = 0; i < input_rows_count; ++i)
{
StringRef res_str = implCached(data[i]);
result_column->insertData(res_str.data, res_str.size);
}
return result_column;
}
private:
struct Cache
void setResult(StringRef & result, const Dwarf::LocationInfo & location, const std::vector<Dwarf::SymbolizedFrame> &) const override
{
std::mutex mutex;
Arena arena;
using Map = HashMap<uintptr_t, StringRef>;
Map map;
std::unordered_map<std::string, Dwarf> dwarfs;
};
const char * arena_begin = nullptr;
WriteBufferFromArena out(cache.arena, arena_begin);
mutable Cache cache;
writeString(location.file.toString(), out);
writeChar(':', out);
writeIntText(location.line, out);
StringRef impl(uintptr_t addr) const
{
auto symbol_index_ptr = SymbolIndex::instance();
const SymbolIndex & symbol_index = *symbol_index_ptr;
if (const auto * object = symbol_index.findObject(reinterpret_cast<const void *>(addr)))
{
auto dwarf_it = cache.dwarfs.try_emplace(object->name, object->elf).first;
if (!std::filesystem::exists(object->name))
return {};
Dwarf::LocationInfo location;
std::vector<Dwarf::SymbolizedFrame> frames; // NOTE: not used in FAST mode.
if (dwarf_it->second.findAddress(addr - uintptr_t(object->address_begin), location, Dwarf::LocationInfoMode::FAST, frames))
{
const char * arena_begin = nullptr;
WriteBufferFromArena out(cache.arena, arena_begin);
writeString(location.file.toString(), out);
writeChar(':', out);
writeIntText(location.line, out);
return out.complete();
}
else
{
return object->name;
}
}
else
return {};
}
StringRef implCached(uintptr_t addr) const
{
Cache::Map::LookupResult it;
bool inserted;
std::lock_guard lock(cache.mutex);
cache.map.emplace(addr, it, inserted);
if (inserted)
it->getMapped() = impl(addr);
return it->getMapped();
result = out.complete();
}
};

View File

@ -0,0 +1,133 @@
#pragma once
#if defined(__ELF__) && !defined(__FreeBSD__)
#include <Common/Dwarf.h>
#include <Common/SymbolIndex.h>
#include <Common/HashTable/HashMap.h>
#include <Common/Arena.h>
#include <Columns/ColumnString.h>
#include <Columns/ColumnArray.h>
#include <Columns/ColumnsNumber.h>
#include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypeArray.h>
#include <Functions/IFunction.h>
#include <Functions/FunctionFactory.h>
#include <IO/WriteBufferFromArena.h>
#include <IO/WriteHelpers.h>
#include <Access/Common/AccessFlags.h>
#include <Interpreters/Context.h>
#include <mutex>
#include <filesystem>
#include <unordered_map>
namespace DB
{
namespace ErrorCodes
{
extern const int ILLEGAL_COLUMN;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
}
template <typename ResultT, Dwarf::LocationInfoMode locationInfoMode>
class FunctionAddressToLineBase : public IFunction
{
public:
size_t getNumberOfArguments() const override { return 1; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{
if (arguments.size() != 1)
throw Exception(
"Function " + getName() + " needs exactly one argument; passed " + toString(arguments.size()) + ".",
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
const auto & type = arguments[0].type;
if (!WhichDataType(type.get()).isUInt64())
throw Exception(
"The only argument for function " + getName() + " must be UInt64. Found " + type->getName() + " instead.",
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
return getDataType();
}
bool useDefaultImplementationForConstants() const override { return true; }
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
const ColumnPtr & column = arguments[0].column;
const ColumnUInt64 * column_concrete = checkAndGetColumn<ColumnUInt64>(column.get());
if (!column_concrete)
throw Exception(
"Illegal column " + column->getName() + " of argument of function " + getName(), ErrorCodes::ILLEGAL_COLUMN);
const typename ColumnVector<UInt64>::Container & data = column_concrete->getData();
return getResultColumn(data, input_rows_count);
}
protected:
virtual DataTypePtr getDataType() const = 0;
virtual ColumnPtr getResultColumn(const typename ColumnVector<UInt64>::Container & data, size_t input_rows_count) const = 0;
virtual void
setResult(ResultT & result, const Dwarf::LocationInfo & location, const std::vector<Dwarf::SymbolizedFrame> & frames) const = 0;
struct Cache
{
std::mutex mutex;
Arena arena;
using Map = HashMap<uintptr_t, ResultT>;
Map map;
std::unordered_map<std::string, Dwarf> dwarfs;
};
mutable Cache cache;
ResultT impl(uintptr_t addr) const
{
auto symbol_index_ptr = SymbolIndex::instance();
const SymbolIndex & symbol_index = *symbol_index_ptr;
if (const auto * object = symbol_index.findObject(reinterpret_cast<const void *>(addr)))
{
auto dwarf_it = cache.dwarfs.try_emplace(object->name, object->elf).first;
if (!std::filesystem::exists(object->name))
return {};
Dwarf::LocationInfo location;
std::vector<Dwarf::SymbolizedFrame> frames; // NOTE: not used in FAST mode.
ResultT result;
if (dwarf_it->second.findAddress(addr - uintptr_t(object->address_begin), location, locationInfoMode, frames))
{
setResult(result, location, frames);
return result;
}
else
return {object->name};
}
else
return {};
}
ResultT implCached(uintptr_t addr) const
{
typename Cache::Map::LookupResult it;
bool inserted;
std::lock_guard lock(cache.mutex);
cache.map.emplace(addr, it, inserted);
if (inserted)
it->getMapped() = impl(addr);
return it->getMapped();
}
};
}
#endif

View File

@ -0,0 +1,99 @@
#if defined(__ELF__) && !defined(__FreeBSD__)
#include <Common/Dwarf.h>
#include <Columns/ColumnString.h>
#include <Columns/ColumnArray.h>
#include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypeArray.h>
#include <Functions/FunctionFactory.h>
#include <IO/WriteBufferFromArena.h>
#include <IO/WriteHelpers.h>
#include <Access/Common/AccessFlags.h>
#include <Functions/addressToLine.h>
#include <vector>
namespace DB
{
namespace
{
class FunctionAddressToLineWithInlines: public FunctionAddressToLineBase<StringRefs, Dwarf::LocationInfoMode::FULL_WITH_INLINE>
{
public:
static constexpr auto name = "addressToLineWithInlines";
String getName() const override { return name; }
static FunctionPtr create(ContextPtr context)
{
context->checkAccess(AccessType::addressToLineWithInlines);
return std::make_shared<FunctionAddressToLineWithInlines>();
}
protected:
DataTypePtr getDataType() const override
{
return std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>());
}
ColumnPtr getResultColumn(const typename ColumnVector<UInt64>::Container & data, size_t input_rows_count) const override
{
auto result_column = ColumnArray::create(ColumnString::create());
ColumnString & result_strings = typeid_cast<ColumnString &>(result_column->getData());
ColumnArray::Offsets & result_offsets = result_column->getOffsets();
ColumnArray::Offset current_offset = 0;
for (size_t i = 0; i < input_rows_count; ++i)
{
StringRefs res = implCached(data[i]);
for (auto & r : res)
result_strings.insertData(r.data, r.size);
current_offset += res.size();
result_offsets.push_back(current_offset);
}
return result_column;
}
void setResult(StringRefs & result, const Dwarf::LocationInfo & location, const std::vector<Dwarf::SymbolizedFrame> & inline_frames) const override
{
appendLocationToResult(result, location, nullptr);
for (const auto & inline_frame : inline_frames)
appendLocationToResult(result, inline_frame.location, &inline_frame);
}
private:
inline ALWAYS_INLINE void appendLocationToResult(StringRefs & result, const Dwarf::LocationInfo & location, const Dwarf::SymbolizedFrame * frame) const
{
const char * arena_begin = nullptr;
WriteBufferFromArena out(cache.arena, arena_begin);
writeString(location.file.toString(), out);
writeChar(':', out);
writeIntText(location.line, out);
if (frame)
{
writeChar(':', out);
int status = 0;
writeString(demangle(frame->name, status), out);
}
result.emplace_back(out.complete());
}
};
}
void registerFunctionAddressToLineWithInlines(FunctionFactory & factory)
{
factory.registerFunction<FunctionAddressToLineWithInlines>();
}
}
#endif

View File

@ -1,6 +1,5 @@
#include <algorithm>
#include <vector>
#include <base/sort.h>
#include <DataTypes/DataTypesNumber.h>
#include <Functions/FunctionFactory.h>
#include "arrayScalarProduct.h"
@ -113,7 +112,8 @@ public:
sorted_labels[i].label = label;
}
::sort(sorted_labels.begin(), sorted_labels.end(), [](const auto & lhs, const auto & rhs) { return lhs.score > rhs.score; });
/// Stable sort is required for for labels to apply in same order if score is equal
std::stable_sort(sorted_labels.begin(), sorted_labels.end(), [](const auto & lhs, const auto & rhs) { return lhs.score > rhs.score; });
/// We will first calculate non-normalized area.

View File

@ -19,6 +19,7 @@ namespace DB
namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int ILLEGAL_COLUMN;
}
namespace
@ -66,8 +67,25 @@ public:
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
const auto * col_lon = arguments[0].column.get();
const auto * col_lat = arguments[1].column.get();
const auto * col_lon = checkAndGetColumn<ColumnFloat64>(arguments[0].column.get());
if (!col_lon)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be Float64",
arguments[0].type->getName(),
1,
getName());
const auto & data_col_lon = col_lon->getData();
const auto * col_lat = checkAndGetColumn<ColumnFloat64>(arguments[1].column.get());
if (!col_lat)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be Float64",
arguments[0].type->getName(),
2,
getName());
const auto & data_col_lat = col_lat->getData();
auto dst = ColumnVector<UInt64>::create();
auto & dst_data = dst->getData();
@ -75,16 +93,14 @@ public:
for (size_t row = 0; row < input_rows_count; ++row)
{
const Float64 lon = col_lon->getFloat64(row);
const Float64 lat = col_lat->getFloat64(row);
const Float64 lon = data_col_lon[row];
const Float64 lat = data_col_lat[row];
if (isNaN(lon) || isNaN(lat))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Arguments must not be NaN");
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Arguments must not be NaN");
if (!(isFinite(lon) && isFinite(lat)))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Arguments must not be infinite");
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Arguments must not be infinite");
/// S2 acceptes point as (latitude, longitude)
S2LatLng lat_lng = S2LatLng::FromDegrees(lat, lon);
@ -95,7 +111,6 @@ public:
return dst;
}
};
}
@ -105,7 +120,6 @@ void registerFunctionGeoToS2(FunctionFactory & factory)
factory.registerFunction<FunctionGeoToS2>();
}
}
#endif

View File

@ -6,6 +6,7 @@ class FunctionFactory;
#if defined(OS_LINUX)
void registerFunctionAddressToSymbol(FunctionFactory & factory);
void registerFunctionAddressToLine(FunctionFactory & factory);
void registerFunctionAddressToLineWithInlines(FunctionFactory & factory);
#endif
void registerFunctionDemangle(FunctionFactory & factory);
@ -17,6 +18,7 @@ void registerFunctionsIntrospection(FunctionFactory & factory)
#if defined(OS_LINUX)
registerFunctionAddressToSymbol(factory);
registerFunctionAddressToLine(factory);
registerFunctionAddressToLineWithInlines(factory);
#endif
registerFunctionDemangle(factory);
registerFunctionTrap(factory);

View File

@ -20,6 +20,7 @@ namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int BAD_ARGUMENTS;
extern const int ILLEGAL_COLUMN;
}
namespace
@ -83,19 +84,47 @@ public:
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
const auto * col_center = arguments[0].column.get();
const auto * col_degrees = arguments[1].column.get();
const auto * col_point = arguments[2].column.get();
const auto * col_center = checkAndGetColumn<ColumnUInt64>(arguments[0].column.get());
if (!col_center)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[0].type->getName(),
1,
getName());
const auto & data_center = col_center->getData();
const auto * col_degrees = checkAndGetColumn<ColumnFloat64>(arguments[1].column.get());
if (!col_degrees)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be Float64",
arguments[1].type->getName(),
2,
getName());
const auto & data_degrees = col_degrees->getData();
const auto * col_point = checkAndGetColumn<ColumnUInt64>(arguments[2].column.get());
if (!col_point)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[2].type->getName(),
3,
getName());
const auto & data_point = col_point->getData();
auto dst = ColumnUInt8::create();
auto & dst_data = dst->getData();
dst_data.reserve(input_rows_count);
for (size_t row=0 ; row < input_rows_count; ++row)
for (size_t row = 0; row < input_rows_count; ++row)
{
const auto center = S2CellId(col_center->getUInt(row));
const Float64 degrees = col_degrees->getFloat64(row);
const auto point = S2CellId(col_point->getUInt(row));
const auto center = S2CellId(data_center[row]);
const Float64 degrees = data_degrees[row];
const auto point = S2CellId(data_point[row]);
if (isNaN(degrees))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Radius of the cap must not be nan");

View File

@ -20,6 +20,7 @@ namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int BAD_ARGUMENTS;
extern const int ILLEGAL_COLUMN;
}
namespace
@ -81,10 +82,45 @@ public:
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
const auto * col_center1 = arguments[0].column.get();
const auto * col_radius1 = arguments[1].column.get();
const auto * col_center2 = arguments[2].column.get();
const auto * col_radius2 = arguments[3].column.get();
const auto * col_center1 = checkAndGetColumn<ColumnUInt64>(arguments[0].column.get());
if (!col_center1)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[0].type->getName(),
1,
getName());
const auto & data_center1 = col_center1->getData();
const auto * col_radius1 = checkAndGetColumn<ColumnFloat64>(arguments[1].column.get());
if (!col_radius1)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be Float64",
arguments[1].type->getName(),
2,
getName());
const auto & data_radius1 = col_radius1->getData();
const auto * col_center2 = checkAndGetColumn<ColumnUInt64>(arguments[2].column.get());
if (!col_center2)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[2].type->getName(),
3,
getName());
const auto & data_center2 = col_center2->getData();
const auto * col_radius2 = checkAndGetColumn<ColumnFloat64>(arguments[3].column.get());
if (!col_radius2)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be Float64",
arguments[3].type->getName(),
4,
getName());
const auto & data_radius2 = col_radius2->getData();
auto col_res_center = ColumnUInt64::create();
auto col_res_radius = ColumnFloat64::create();
@ -97,10 +133,10 @@ public:
for (size_t row = 0; row < input_rows_count; ++row)
{
const UInt64 first_center = col_center1->getUInt(row);
const Float64 first_radius = col_radius1->getFloat64(row);
const UInt64 second_center = col_center2->getUInt(row);
const Float64 second_radius = col_radius2->getFloat64(row);
const UInt64 first_center = data_center1[row];
const Float64 first_radius = data_radius1[row];
const UInt64 second_center = data_center2[row];
const Float64 second_radius = data_radius2[row];
if (isNaN(first_radius) || isNaN(second_radius))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Radius of the cap must not be nan");
@ -125,7 +161,6 @@ public:
return ColumnTuple::create(Columns{std::move(col_res_center), std::move(col_res_radius)});
}
};
}
@ -135,7 +170,6 @@ void registerFunctionS2CapUnion(FunctionFactory & factory)
factory.registerFunction<FunctionS2CapUnion>();
}
}
#endif

View File

@ -19,6 +19,7 @@ namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int BAD_ARGUMENTS;
extern const int ILLEGAL_COLUMN;
}
namespace
@ -65,8 +66,25 @@ public:
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
const auto * col_id_first = arguments[0].column.get();
const auto * col_id_second = arguments[1].column.get();
const auto * col_id_first = checkAndGetColumn<ColumnUInt64>(arguments[0].column.get());
if (!col_id_first)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[0].type->getName(),
1,
getName());
const auto & data_id_first = col_id_first->getData();
const auto * col_id_second = checkAndGetColumn<ColumnUInt64>(arguments[1].column.get());
if (!col_id_second)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[1].type->getName(),
2,
getName());
const auto & data_id_second = col_id_second->getData();
auto dst = ColumnUInt8::create();
auto & dst_data = dst->getData();
@ -74,8 +92,8 @@ public:
for (size_t row = 0; row < input_rows_count; ++row)
{
const UInt64 id_first = col_id_first->getInt(row);
const UInt64 id_second = col_id_second->getInt(row);
const UInt64 id_first = data_id_first[row];
const UInt64 id_second = data_id_second[row];
auto first_cell = S2CellId(id_first);
auto second_cell = S2CellId(id_second);

View File

@ -19,6 +19,7 @@ namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int BAD_ARGUMENTS;
extern const int ILLEGAL_COLUMN;
}
namespace
@ -56,15 +57,25 @@ public:
if (!WhichDataType(arg).isUInt64())
throw Exception(
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal type {} of argument {} of function {}. Must be Float64",
arg->getName(), 1, getName());
"Illegal type {} of argument {} of function {}. Must be UInt64",
arg->getName(),
1,
getName());
return std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>());
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
const auto * col_id = arguments[0].column.get();
const auto * col_id = checkAndGetColumn<ColumnUInt64>(arguments[0].column.get());
if (!col_id)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[0].type->getName(),
1,
getName());
const auto & data_id = col_id->getData();
auto dst = ColumnArray::create(ColumnUInt64::create());
auto & dst_data = dst->getData();
@ -74,7 +85,7 @@ public:
for (size_t row = 0; row < input_rows_count; ++row)
{
const UInt64 id = col_id->getUInt(row);
const UInt64 id = data_id[row];
S2CellId cell_id(id);

View File

@ -19,6 +19,7 @@ namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int BAD_ARGUMENTS;
extern const int ILLEGAL_COLUMN;
}
namespace
@ -64,9 +65,35 @@ public:
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
const auto * col_lo = arguments[0].column.get();
const auto * col_hi = arguments[1].column.get();
const auto * col_point = arguments[2].column.get();
const auto * col_lo = checkAndGetColumn<ColumnUInt64>(arguments[0].column.get());
if (!col_lo)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[0].type->getName(),
1,
getName());
const auto & data_low = col_lo->getData();
const auto * col_hi = checkAndGetColumn<ColumnUInt64>(arguments[1].column.get());
if (!col_hi)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[1].type->getName(),
2,
getName());
const auto & data_hi = col_hi->getData();
const auto * col_point = checkAndGetColumn<ColumnUInt64>(arguments[2].column.get());
if (!col_point)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[2].type->getName(),
3,
getName());
const auto & data_point = col_point->getData();
auto col_res_first = ColumnUInt64::create();
auto col_res_second = ColumnUInt64::create();
@ -79,9 +106,9 @@ public:
for (size_t row = 0; row < input_rows_count; ++row)
{
const auto lo = S2CellId(col_lo->getUInt(row));
const auto hi = S2CellId(col_hi->getUInt(row));
const auto point = S2CellId(col_point->getUInt(row));
const auto lo = S2CellId(data_low[row]);
const auto hi = S2CellId(data_hi[row]);
const auto point = S2CellId(data_point[row]);
if (!lo.is_valid() || !hi.is_valid())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Rectangle is not valid");

View File

@ -14,11 +14,12 @@
namespace DB
{
namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int BAD_ARGUMENTS;
extern const int ILLEGAL_COLUMN;
}
namespace
@ -62,9 +63,35 @@ public:
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
const auto * col_lo = arguments[0].column.get();
const auto * col_hi = arguments[1].column.get();
const auto * col_point = arguments[2].column.get();
const auto * col_lo = checkAndGetColumn<ColumnUInt64>(arguments[0].column.get());
if (!col_lo)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[0].type->getName(),
1,
getName());
const auto & data_low = col_lo->getData();
const auto * col_hi = checkAndGetColumn<ColumnUInt64>(arguments[1].column.get());
if (!col_hi)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[1].type->getName(),
2,
getName());
const auto & data_hi = col_hi->getData();
const auto * col_point = checkAndGetColumn<ColumnUInt64>(arguments[2].column.get());
if (!col_point)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[2].type->getName(),
3,
getName());
const auto & data_point = col_point->getData();
auto dst = ColumnVector<UInt8>::create();
auto & dst_data = dst->getData();
@ -72,9 +99,9 @@ public:
for (size_t row = 0; row < input_rows_count; ++row)
{
const auto lo = S2CellId(col_lo->getUInt(row));
const auto hi = S2CellId(col_hi->getUInt(row));
const auto point = S2CellId(col_point->getUInt(row));
const auto lo = S2CellId(data_low[row]);
const auto hi = S2CellId(data_hi[row]);
const auto point = S2CellId(data_point[row]);
if (!lo.is_valid() || !hi.is_valid())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Rectangle is not valid");

View File

@ -21,6 +21,7 @@ namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int BAD_ARGUMENTS;
extern const int ILLEGAL_COLUMN;
}
namespace
@ -67,10 +68,45 @@ public:
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
const auto * col_lo1 = arguments[0].column.get();
const auto * col_hi1 = arguments[1].column.get();
const auto * col_lo2 = arguments[2].column.get();
const auto * col_hi2 = arguments[3].column.get();
const auto * col_lo1 = checkAndGetColumn<ColumnUInt64>(arguments[0].column.get());
if (!col_lo1)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[0].type->getName(),
1,
getName());
const auto & data_lo1 = col_lo1->getData();
const auto * col_hi1 = checkAndGetColumn<ColumnFloat64>(arguments[1].column.get());
if (!col_hi1)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[1].type->getName(),
2,
getName());
const auto & data_hi1 = col_hi1->getData();
const auto * col_lo2 = checkAndGetColumn<ColumnUInt64>(arguments[2].column.get());
if (!col_lo2)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[2].type->getName(),
3,
getName());
const auto & data_lo2 = col_lo2->getData();
const auto * col_hi2 = checkAndGetColumn<ColumnFloat64>(arguments[3].column.get());
if (!col_hi2)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[3].type->getName(),
4,
getName());
const auto & data_hi2 = col_hi2->getData();
auto col_res_first = ColumnUInt64::create();
auto col_res_second = ColumnUInt64::create();
@ -83,10 +119,10 @@ public:
for (size_t row = 0; row < input_rows_count; ++row)
{
const auto lo1 = S2CellId(col_lo1->getUInt(row));
const auto hi1 = S2CellId(col_hi1->getUInt(row));
const auto lo2 = S2CellId(col_lo2->getUInt(row));
const auto hi2 = S2CellId(col_hi2->getUInt(row));
const auto lo1 = S2CellId(data_lo1[row]);
const auto hi1 = S2CellId(data_hi1[row]);
const auto lo2 = S2CellId(data_lo2[row]);
const auto hi2 = S2CellId(data_hi2[row]);
if (!lo1.is_valid() || !hi1.is_valid())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "First rectangle is not valid");

View File

@ -19,6 +19,7 @@ namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int BAD_ARGUMENTS;
extern const int ILLEGAL_COLUMN;
}
namespace
@ -65,10 +66,45 @@ public:
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
const auto * col_lo1 = arguments[0].column.get();
const auto * col_hi1 = arguments[1].column.get();
const auto * col_lo2 = arguments[2].column.get();
const auto * col_hi2 = arguments[3].column.get();
const auto * col_lo1 = checkAndGetColumn<ColumnUInt64>(arguments[0].column.get());
if (!col_lo1)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[0].type->getName(),
1,
getName());
const auto & data_lo1 = col_lo1->getData();
const auto * col_hi1 = checkAndGetColumn<ColumnFloat64>(arguments[1].column.get());
if (!col_hi1)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[1].type->getName(),
2,
getName());
const auto & data_hi1 = col_hi1->getData();
const auto * col_lo2 = checkAndGetColumn<ColumnUInt64>(arguments[2].column.get());
if (!col_lo2)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[2].type->getName(),
3,
getName());
const auto & data_lo2 = col_lo2->getData();
const auto * col_hi2 = checkAndGetColumn<ColumnFloat64>(arguments[3].column.get());
if (!col_hi2)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[3].type->getName(),
4,
getName());
const auto & data_hi2 = col_hi2->getData();
auto col_res_first = ColumnUInt64::create();
auto col_res_second = ColumnUInt64::create();
@ -81,10 +117,10 @@ public:
for (size_t row = 0; row < input_rows_count; ++row)
{
const auto lo1 = S2CellId(col_lo1->getUInt(row));
const auto hi1 = S2CellId(col_hi1->getUInt(row));
const auto lo2 = S2CellId(col_lo2->getUInt(row));
const auto hi2 = S2CellId(col_hi2->getUInt(row));
const auto lo1 = S2CellId(data_lo1[row]);
const auto hi1 = S2CellId(data_hi1[row]);
const auto lo2 = S2CellId(data_lo2[row]);
const auto hi2 = S2CellId(data_hi2[row]);
if (!lo1.is_valid() || !hi1.is_valid())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "First rectangle is not valid");

View File

@ -21,6 +21,7 @@ namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int BAD_ARGUMENTS;
extern const int ILLEGAL_COLUMN;
}
namespace
@ -57,7 +58,7 @@ public:
if (!WhichDataType(arg).isUInt64())
throw Exception(
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal type {} of argument {} of function {}. Must be Float64",
"Illegal type {} of argument {} of function {}. Must be UInt64",
arg->getName(), 1, getName());
DataTypePtr element = std::make_shared<DataTypeFloat64>();
@ -67,7 +68,16 @@ public:
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
const auto * col_id = arguments[0].column.get();
const auto * col_id = checkAndGetColumn<ColumnUInt64>(arguments[0].column.get());
if (!col_id)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[0].type->getName(),
1,
getName());
const auto & data_id = col_id->getData();
auto col_longitude = ColumnFloat64::create();
auto col_latitude = ColumnFloat64::create();
@ -80,7 +90,7 @@ public:
for (size_t row = 0; row < input_rows_count; ++row)
{
const auto id = S2CellId(col_id->getUInt(row));
const auto id = S2CellId(data_id[row]);
if (!id.is_valid())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Point is not valid");

View File

@ -0,0 +1,60 @@
#pragma once
#include <base/types.h>
#include <memory>
namespace DB
{
class ReadBuffer;
class ReadBufferFromFileBase;
class SeekableReadBuffer;
/// Interface for reading an archive.
class IArchiveReader : public std::enable_shared_from_this<IArchiveReader>
{
public:
virtual ~IArchiveReader() = default;
/// Returns true if there is a specified file in the archive.
virtual bool fileExists(const String & filename) = 0;
struct FileInfo
{
UInt64 uncompressed_size;
UInt64 compressed_size;
int compression_method;
bool is_encrypted;
};
/// Returns the information about a file stored in the archive.
virtual FileInfo getFileInfo(const String & filename) = 0;
class FileEnumerator
{
public:
virtual ~FileEnumerator() = default;
virtual const String & getFileName() const = 0;
virtual const FileInfo & getFileInfo() const = 0;
virtual bool nextFile() = 0;
};
/// Starts enumerating files in the archive.
virtual std::unique_ptr<FileEnumerator> firstFile() = 0;
/// Starts reading a file from the archive. The function returns a read buffer,
/// you can read that buffer to extract uncompressed data from the archive.
/// Several read buffers can be used at the same time in parallel.
virtual std::unique_ptr<ReadBufferFromFileBase> readFile(const String & filename) = 0;
/// It's possible to convert a file enumerator to a read buffer and vice versa.
virtual std::unique_ptr<ReadBufferFromFileBase> readFile(std::unique_ptr<FileEnumerator> enumerator) = 0;
virtual std::unique_ptr<FileEnumerator> nextFile(std::unique_ptr<ReadBuffer> read_buffer) = 0;
/// Sets password used to decrypt files in the archive.
virtual void setPassword(const String & /* password */) {}
using ReadArchiveFunction = std::function<std::unique_ptr<SeekableReadBuffer>()>;
};
}

View File

@ -0,0 +1,38 @@
#pragma once
#include <base/types.h>
#include <memory>
namespace DB
{
class WriteBufferFromFileBase;
/// Interface for writing an archive.
class IArchiveWriter : public std::enable_shared_from_this<IArchiveWriter>
{
public:
/// Destructors finalizes writing the archive.
virtual ~IArchiveWriter() = default;
/// Starts writing a file to the archive. The function returns a write buffer,
/// any data written to that buffer will be compressed and then put to the archive.
/// You can keep only one such buffer at a time, a buffer returned by previous call
/// of the function `writeFile()` should be destroyed before next call of `writeFile()`.
virtual std::unique_ptr<WriteBufferFromFileBase> writeFile(const String & filename) = 0;
/// Returns true if there is an active instance of WriteBuffer returned by writeFile().
/// This function should be used mostly for debugging purposes.
virtual bool isWritingFile() const = 0;
static constexpr const int kDefaultCompressionLevel = -1;
/// Sets compression method and level.
/// Changing them will affect next file in the archive.
virtual void setCompression(int /* compression_method */, int /* compression_level */ = kDefaultCompressionLevel) {}
/// Sets password. If the password is not empty it will enable encryption in the archive.
virtual void setPassword(const String & /* password */) {}
};
}

View File

@ -0,0 +1,563 @@
#include <IO/Archives/ZipArchiveReader.h>
#if USE_MINIZIP
#include <IO/ReadBufferFromFileBase.h>
#include <Common/quoteString.h>
#include <unzip.h>
namespace DB
{
namespace ErrorCodes
{
extern const int CANNOT_UNPACK_ARCHIVE;
extern const int LOGICAL_ERROR;
extern const int SEEK_POSITION_OUT_OF_BOUND;
}
using RawHandle = unzFile;
/// Holds a raw handle, calls acquireRawHandle() in the constructor and releaseRawHandle() in the destructor.
class ZipArchiveReader::HandleHolder
{
public:
HandleHolder() = default;
explicit HandleHolder(const std::shared_ptr<ZipArchiveReader> & reader_) : reader(reader_), raw_handle(reader->acquireRawHandle()) { }
~HandleHolder()
{
if (raw_handle)
{
try
{
closeFile();
}
catch (...)
{
tryLogCurrentException("ZipArchiveReader");
}
reader->releaseRawHandle(raw_handle);
}
}
HandleHolder(HandleHolder && src)
{
*this = std::move(src);
}
HandleHolder & operator =(HandleHolder && src)
{
reader = std::exchange(src.reader, nullptr);
raw_handle = std::exchange(src.raw_handle, nullptr);
file_name = std::exchange(src.file_name, {});
file_info = std::exchange(src.file_info, {});
return *this;
}
RawHandle getRawHandle() const { return raw_handle; }
std::shared_ptr<ZipArchiveReader> getReader() const { return reader; }
void locateFile(const String & file_name_)
{
resetFileInfo();
bool case_sensitive = true;
int err = unzLocateFile(raw_handle, file_name_.c_str(), reinterpret_cast<unzFileNameComparer>(static_cast<size_t>(case_sensitive)));
if (err == UNZ_END_OF_LIST_OF_FILE)
showError("File " + quoteString(file_name_) + " not found");
file_name = file_name_;
}
bool tryLocateFile(const String & file_name_)
{
resetFileInfo();
bool case_sensitive = true;
int err = unzLocateFile(raw_handle, file_name_.c_str(), reinterpret_cast<unzFileNameComparer>(static_cast<size_t>(case_sensitive)));
if (err == UNZ_END_OF_LIST_OF_FILE)
return false;
checkResult(err);
file_name = file_name_;
return true;
}
bool firstFile()
{
resetFileInfo();
int err = unzGoToFirstFile(raw_handle);
if (err == UNZ_END_OF_LIST_OF_FILE)
return false;
checkResult(err);
return true;
}
bool nextFile()
{
resetFileInfo();
int err = unzGoToNextFile(raw_handle);
if (err == UNZ_END_OF_LIST_OF_FILE)
return false;
checkResult(err);
return true;
}
const String & getFileName() const
{
if (!file_name)
retrieveFileInfo();
return *file_name;
}
const FileInfo & getFileInfo() const
{
if (!file_info)
retrieveFileInfo();
return *file_info;
}
void closeFile()
{
int err = unzCloseCurrentFile(raw_handle);
/// If err == UNZ_PARAMERROR the file is already closed.
if (err != UNZ_PARAMERROR)
checkResult(err);
}
void checkResult(int code) const { reader->checkResult(code); }
[[noreturn]] void showError(const String & message) const { reader->showError(message); }
private:
void retrieveFileInfo() const
{
if (file_name && file_info)
return;
unz_file_info64 finfo;
int err = unzGetCurrentFileInfo64(raw_handle, &finfo, nullptr, 0, nullptr, 0, nullptr, 0);
if (err == UNZ_PARAMERROR)
showError("No current file");
checkResult(err);
if (!file_info)
{
file_info.emplace();
file_info->uncompressed_size = finfo.uncompressed_size;
file_info->compressed_size = finfo.compressed_size;
file_info->compression_method = finfo.compression_method;
file_info->is_encrypted = (finfo.flag & MZ_ZIP_FLAG_ENCRYPTED);
}
if (!file_name)
{
file_name.emplace();
file_name->resize(finfo.size_filename);
checkResult(unzGetCurrentFileInfo64(raw_handle, nullptr, file_name->data(), finfo.size_filename, nullptr, 0, nullptr, 0));
}
}
void resetFileInfo()
{
file_info.reset();
file_name.reset();
}
std::shared_ptr<ZipArchiveReader> reader;
RawHandle raw_handle = nullptr;
mutable std::optional<String> file_name;
mutable std::optional<FileInfo> file_info;
};
/// This class represents a ReadBuffer actually returned by readFile().
class ZipArchiveReader::ReadBufferFromZipArchive : public ReadBufferFromFileBase
{
public:
explicit ReadBufferFromZipArchive(HandleHolder && handle_)
: ReadBufferFromFileBase(DBMS_DEFAULT_BUFFER_SIZE, nullptr, 0)
, handle(std::move(handle_))
{
const auto & file_info = handle.getFileInfo();
checkCompressionMethodIsEnabled(static_cast<CompressionMethod>(file_info.compression_method));
const char * password_cstr = nullptr;
if (file_info.is_encrypted)
{
const auto & password_str = handle.getReader()->password;
if (password_str.empty())
showError("Password is required");
password_cstr = password_str.c_str();
checkEncryptionIsEnabled();
}
RawHandle raw_handle = handle.getRawHandle();
int err = unzOpenCurrentFilePassword(raw_handle, password_cstr);
if (err == MZ_PASSWORD_ERROR)
showError("Wrong password");
checkResult(err);
}
off_t seek(off_t off, int whence) override
{
off_t current_pos = getPosition();
off_t new_pos;
if (whence == SEEK_SET)
new_pos = off;
else if (whence == SEEK_CUR)
new_pos = off + current_pos;
else
throw Exception("Only SEEK_SET and SEEK_CUR seek modes allowed.", ErrorCodes::SEEK_POSITION_OUT_OF_BOUND);
if (new_pos == current_pos)
return current_pos; /// The position is the same.
if (new_pos < 0)
throw Exception("Seek position is out of bound", ErrorCodes::SEEK_POSITION_OUT_OF_BOUND);
off_t working_buffer_start_pos = current_pos - offset();
off_t working_buffer_end_pos = current_pos + available();
if ((working_buffer_start_pos <= new_pos) && (new_pos <= working_buffer_end_pos))
{
/// The new position is still inside the buffer.
position() += new_pos - current_pos;
return new_pos;
}
RawHandle raw_handle = handle.getRawHandle();
/// Check that the new position is now beyond the end of the file.
const auto & file_info = handle.getFileInfo();
if (new_pos > static_cast<off_t>(file_info.uncompressed_size))
throw Exception("Seek position is out of bound", ErrorCodes::SEEK_POSITION_OUT_OF_BOUND);
if (file_info.compression_method == static_cast<int>(CompressionMethod::kStore))
{
/// unzSeek64() works only for non-compressed files.
checkResult(unzSeek64(raw_handle, off, whence));
return unzTell64(raw_handle);
}
/// As a last try we go slow way, we're going to simply ignore all data before the new position.
if (new_pos < current_pos)
{
checkResult(unzCloseCurrentFile(raw_handle));
checkResult(unzOpenCurrentFile(raw_handle));
current_pos = 0;
}
ignore(new_pos - current_pos);
return new_pos;
}
off_t getPosition() override
{
RawHandle raw_handle = handle.getRawHandle();
return unzTell64(raw_handle) - available();
}
String getFileName() const override { return handle.getFileName(); }
/// Releases owned handle to pass it to an enumerator.
HandleHolder releaseHandle() &&
{
handle.closeFile();
return std::move(handle);
}
private:
bool nextImpl() override
{
RawHandle raw_handle = handle.getRawHandle();
auto bytes_read = unzReadCurrentFile(raw_handle, internal_buffer.begin(), internal_buffer.size());
if (bytes_read < 0)
checkResult(bytes_read);
if (!bytes_read)
return false;
working_buffer = internal_buffer;
working_buffer.resize(bytes_read);
return true;
}
void checkResult(int code) const { handle.checkResult(code); }
[[noreturn]] void showError(const String & message) const { handle.showError(message); }
HandleHolder handle;
};
class ZipArchiveReader::FileEnumeratorImpl : public FileEnumerator
{
public:
explicit FileEnumeratorImpl(HandleHolder && handle_) : handle(std::move(handle_)) {}
const String & getFileName() const override { return handle.getFileName(); }
const FileInfo & getFileInfo() const override { return handle.getFileInfo(); }
bool nextFile() override { return handle.nextFile(); }
/// Releases owned handle to pass it to a read buffer.
HandleHolder releaseHandle() && { return std::move(handle); }
private:
HandleHolder handle;
};
namespace
{
/// Provides a set of functions allowing the minizip library to read its input
/// from a SeekableReadBuffer instead of an ordinary file in the local filesystem.
class StreamFromReadBuffer
{
public:
static RawHandle open(std::unique_ptr<SeekableReadBuffer> archive_read_buffer, UInt64 archive_size)
{
StreamFromReadBuffer::Opaque opaque{std::move(archive_read_buffer), archive_size};
zlib_filefunc64_def func_def;
func_def.zopen64_file = &StreamFromReadBuffer::openFileFunc;
func_def.zclose_file = &StreamFromReadBuffer::closeFileFunc;
func_def.zread_file = &StreamFromReadBuffer::readFileFunc;
func_def.zwrite_file = &StreamFromReadBuffer::writeFileFunc;
func_def.zseek64_file = &StreamFromReadBuffer::seekFunc;
func_def.ztell64_file = &StreamFromReadBuffer::tellFunc;
func_def.zerror_file = &StreamFromReadBuffer::testErrorFunc;
func_def.opaque = &opaque;
return unzOpen2_64(/* path= */ nullptr,
&func_def);
}
private:
std::unique_ptr<SeekableReadBuffer> read_buffer;
UInt64 start_offset = 0;
UInt64 total_size = 0;
bool at_end = false;
struct Opaque
{
std::unique_ptr<SeekableReadBuffer> read_buffer;
UInt64 total_size = 0;
};
static void * openFileFunc(void * opaque, const void *, int)
{
auto & opq = *reinterpret_cast<Opaque *>(opaque);
return new StreamFromReadBuffer(std::move(opq.read_buffer), opq.total_size);
}
StreamFromReadBuffer(std::unique_ptr<SeekableReadBuffer> read_buffer_, UInt64 total_size_)
: read_buffer(std::move(read_buffer_)), start_offset(read_buffer->getPosition()), total_size(total_size_) {}
static int closeFileFunc(void *, void * stream)
{
delete reinterpret_cast<StreamFromReadBuffer *>(stream);
return ZIP_OK;
}
static StreamFromReadBuffer & get(void * ptr)
{
return *reinterpret_cast<StreamFromReadBuffer *>(ptr);
}
static int testErrorFunc(void *, void *)
{
return ZIP_OK;
}
static unsigned long readFileFunc(void *, void * stream, void * buf, unsigned long size) // NOLINT(google-runtime-int)
{
auto & strm = get(stream);
if (strm.at_end)
return 0;
auto read_bytes = strm.read_buffer->read(reinterpret_cast<char *>(buf), size);
return read_bytes;
}
static ZPOS64_T tellFunc(void *, void * stream)
{
auto & strm = get(stream);
if (strm.at_end)
return strm.total_size;
auto pos = strm.read_buffer->getPosition() - strm.start_offset;
return pos;
}
static long seekFunc(void *, void * stream, ZPOS64_T offset, int origin) // NOLINT(google-runtime-int)
{
auto & strm = get(stream);
if (origin == SEEK_END)
{
/// Our implementations of SeekableReadBuffer don't support SEEK_END,
/// but the minizip library needs it, so we have to simulate it here.
strm.at_end = true;
return ZIP_OK;
}
strm.at_end = false;
if (origin == SEEK_SET)
offset += strm.start_offset;
strm.read_buffer->seek(offset, origin);
return ZIP_OK;
}
static unsigned long writeFileFunc(void *, void *, const void *, unsigned long) // NOLINT(google-runtime-int)
{
throw Exception(ErrorCodes::LOGICAL_ERROR, "StreamFromReadBuffer::writeFile must not be called");
}
};
}
ZipArchiveReader::ZipArchiveReader(const String & path_to_archive_)
: path_to_archive(path_to_archive_)
{
init();
}
ZipArchiveReader::ZipArchiveReader(
const String & path_to_archive_, const ReadArchiveFunction & archive_read_function_, UInt64 archive_size_)
: path_to_archive(path_to_archive_), archive_read_function(archive_read_function_), archive_size(archive_size_)
{
init();
}
void ZipArchiveReader::init()
{
/// Prepare the first handle in `free_handles` and check that the archive can be read.
releaseRawHandle(acquireRawHandle());
}
ZipArchiveReader::~ZipArchiveReader()
{
/// Close all `free_handles`.
for (RawHandle free_handle : free_handles)
{
try
{
checkResult(unzClose(free_handle));
}
catch (...)
{
tryLogCurrentException("ZipArchiveReader");
}
}
}
bool ZipArchiveReader::fileExists(const String & filename)
{
return acquireHandle().tryLocateFile(filename);
}
ZipArchiveReader::FileInfo ZipArchiveReader::getFileInfo(const String & filename)
{
auto handle = acquireHandle();
handle.locateFile(filename);
return handle.getFileInfo();
}
std::unique_ptr<ZipArchiveReader::FileEnumerator> ZipArchiveReader::firstFile()
{
auto handle = acquireHandle();
if (!handle.firstFile())
return nullptr;
return std::make_unique<FileEnumeratorImpl>(std::move(handle));
}
std::unique_ptr<ReadBufferFromFileBase> ZipArchiveReader::readFile(const String & filename)
{
auto handle = acquireHandle();
handle.locateFile(filename);
return std::make_unique<ReadBufferFromZipArchive>(std::move(handle));
}
std::unique_ptr<ReadBufferFromFileBase> ZipArchiveReader::readFile(std::unique_ptr<FileEnumerator> enumerator)
{
if (!dynamic_cast<FileEnumeratorImpl *>(enumerator.get()))
throw Exception("Wrong enumerator passed to readFile()", ErrorCodes::LOGICAL_ERROR);
auto enumerator_impl = std::unique_ptr<FileEnumeratorImpl>(static_cast<FileEnumeratorImpl *>(enumerator.release()));
auto handle = std::move(*enumerator_impl).releaseHandle();
return std::make_unique<ReadBufferFromZipArchive>(std::move(handle));
}
std::unique_ptr<ZipArchiveReader::FileEnumerator> ZipArchiveReader::nextFile(std::unique_ptr<ReadBuffer> read_buffer)
{
if (!dynamic_cast<ReadBufferFromZipArchive *>(read_buffer.get()))
throw Exception("Wrong ReadBuffer passed to nextFile()", ErrorCodes::LOGICAL_ERROR);
auto read_buffer_from_zip = std::unique_ptr<ReadBufferFromZipArchive>(static_cast<ReadBufferFromZipArchive *>(read_buffer.release()));
auto handle = std::move(*read_buffer_from_zip).releaseHandle();
if (!handle.nextFile())
return nullptr;
return std::make_unique<FileEnumeratorImpl>(std::move(handle));
}
void ZipArchiveReader::setPassword(const String & password_)
{
std::lock_guard lock{mutex};
password = password_;
}
ZipArchiveReader::HandleHolder ZipArchiveReader::acquireHandle()
{
return HandleHolder{std::static_pointer_cast<ZipArchiveReader>(shared_from_this())};
}
ZipArchiveReader::RawHandle ZipArchiveReader::acquireRawHandle()
{
std::lock_guard lock{mutex};
if (!free_handles.empty())
{
RawHandle free_handle = free_handles.back();
free_handles.pop_back();
return free_handle;
}
RawHandle new_handle = nullptr;
if (archive_read_function)
new_handle = StreamFromReadBuffer::open(archive_read_function(), archive_size);
else
new_handle = unzOpen64(path_to_archive.c_str());
if (!new_handle)
throw Exception(ErrorCodes::CANNOT_UNPACK_ARCHIVE, "Couldn't open zip archive {}", quoteString(path_to_archive));
return new_handle;
}
void ZipArchiveReader::releaseRawHandle(RawHandle handle_)
{
if (!handle_)
return;
std::lock_guard lock{mutex};
free_handles.push_back(handle_);
}
void ZipArchiveReader::checkResult(int code) const
{
if (code >= UNZ_OK)
return;
String message = "Code= ";
switch (code)
{
case UNZ_OK: return;
case UNZ_ERRNO: message += "ERRNO, errno= " + String{strerror(errno)}; break;
case UNZ_PARAMERROR: message += "PARAMERROR"; break;
case UNZ_BADZIPFILE: message += "BADZIPFILE"; break;
case UNZ_INTERNALERROR: message += "INTERNALERROR"; break;
case UNZ_CRCERROR: message += "CRCERROR"; break;
case UNZ_BADPASSWORD: message += "BADPASSWORD"; break;
default: message += std::to_string(code); break;
}
showError(message);
}
void ZipArchiveReader::showError(const String & message) const
{
throw Exception(ErrorCodes::CANNOT_UNPACK_ARCHIVE, "Couldn't unpack zip archive {}: {}", quoteString(path_to_archive), message);
}
}
#endif

View File

@ -0,0 +1,86 @@
#pragma once
#include <Common/config.h>
#if USE_MINIZIP
#include <IO/Archives/IArchiveReader.h>
#include <IO/Archives/ZipArchiveWriter.h>
#include <base/shared_ptr_helper.h>
#include <mutex>
#include <vector>
namespace DB
{
class ReadBuffer;
class ReadBufferFromFileBase;
class SeekableReadBuffer;
/// Implementation of IArchiveReader for reading zip archives.
class ZipArchiveReader : public shared_ptr_helper<ZipArchiveReader>, public IArchiveReader
{
public:
using CompressionMethod = ZipArchiveWriter::CompressionMethod;
~ZipArchiveReader() override;
/// Returns true if there is a specified file in the archive.
bool fileExists(const String & filename) override;
/// Returns the information about a file stored in the archive.
FileInfo getFileInfo(const String & filename) override;
/// Starts enumerating files in the archive.
std::unique_ptr<FileEnumerator> firstFile() override;
/// Starts reading a file from the archive. The function returns a read buffer,
/// you can read that buffer to extract uncompressed data from the archive.
/// Several read buffers can be used at the same time in parallel.
std::unique_ptr<ReadBufferFromFileBase> readFile(const String & filename) override;
/// It's possible to convert a file enumerator to a read buffer and vice versa.
std::unique_ptr<ReadBufferFromFileBase> readFile(std::unique_ptr<FileEnumerator> enumerator) override;
std::unique_ptr<FileEnumerator> nextFile(std::unique_ptr<ReadBuffer> read_buffer) override;
/// Sets password used to decrypt the contents of the files in the archive.
void setPassword(const String & password_) override;
/// Utility functions.
static CompressionMethod parseCompressionMethod(const String & str) { return ZipArchiveWriter::parseCompressionMethod(str); }
static void checkCompressionMethodIsEnabled(CompressionMethod method) { ZipArchiveWriter::checkCompressionMethodIsEnabled(method); }
static void checkEncryptionIsEnabled() { ZipArchiveWriter::checkEncryptionIsEnabled(); }
private:
/// Constructs an archive's reader that will read from a file in the local filesystem.
explicit ZipArchiveReader(const String & path_to_archive_);
/// Constructs an archive's reader that will read by making a read buffer by using
/// a specified function.
ZipArchiveReader(const String & path_to_archive_, const ReadArchiveFunction & archive_read_function_, UInt64 archive_size_);
friend struct shared_ptr_helper<ZipArchiveReader>;
class ReadBufferFromZipArchive;
class FileEnumeratorImpl;
class HandleHolder;
using RawHandle = void *;
void init();
HandleHolder acquireHandle();
RawHandle acquireRawHandle();
void releaseRawHandle(RawHandle handle_);
void checkResult(int code) const;
[[noreturn]] void showError(const String & message) const;
const String path_to_archive;
const ReadArchiveFunction archive_read_function;
const UInt64 archive_size = 0;
String password;
std::vector<RawHandle> free_handles;
mutable std::mutex mutex;
};
}
#endif

View File

@ -0,0 +1,385 @@
#include <IO/Archives/ZipArchiveWriter.h>
#if USE_MINIZIP
#include <IO/WriteBufferFromFileBase.h>
#include <Common/quoteString.h>
#include <zip.h>
#include <boost/algorithm/string/predicate.hpp>
namespace DB
{
namespace ErrorCodes
{
extern const int CANNOT_PACK_ARCHIVE;
extern const int SUPPORT_IS_DISABLED;
extern const int LOGICAL_ERROR;
}
using RawHandle = zipFile;
/// Holds a raw handle, calls acquireRawHandle() in the constructor and releaseRawHandle() in the destructor.
class ZipArchiveWriter::HandleHolder
{
public:
HandleHolder() = default;
explicit HandleHolder(const std::shared_ptr<ZipArchiveWriter> & writer_) : writer(writer_), raw_handle(writer->acquireRawHandle()) { }
~HandleHolder()
{
if (raw_handle)
{
try
{
int err = zipCloseFileInZip(raw_handle);
/// If err == ZIP_PARAMERROR the file is already closed.
if (err != ZIP_PARAMERROR)
checkResult(err);
}
catch (...)
{
tryLogCurrentException("ZipArchiveWriter");
}
writer->releaseRawHandle(raw_handle);
}
}
HandleHolder(HandleHolder && src)
{
*this = std::move(src);
}
HandleHolder & operator =(HandleHolder && src)
{
writer = std::exchange(src.writer, nullptr);
raw_handle = std::exchange(src.raw_handle, nullptr);
return *this;
}
RawHandle getRawHandle() const { return raw_handle; }
std::shared_ptr<ZipArchiveWriter> getWriter() const { return writer; }
void checkResult(int code) const { writer->checkResult(code); }
private:
std::shared_ptr<ZipArchiveWriter> writer;
RawHandle raw_handle = nullptr;
};
/// This class represents a WriteBuffer actually returned by writeFile().
class ZipArchiveWriter::WriteBufferFromZipArchive : public WriteBufferFromFileBase
{
public:
WriteBufferFromZipArchive(HandleHolder && handle_, const String & filename_)
: WriteBufferFromFileBase(DBMS_DEFAULT_BUFFER_SIZE, nullptr, 0)
, handle(std::move(handle_))
, filename(filename_)
{
auto compress_method = handle.getWriter()->compression_method;
auto compress_level = handle.getWriter()->compression_level;
checkCompressionMethodIsEnabled(static_cast<CompressionMethod>(compress_method));
const char * password_cstr = nullptr;
const String & password_str = handle.getWriter()->password;
if (!password_str.empty())
{
checkEncryptionIsEnabled();
password_cstr = password_str.c_str();
}
RawHandle raw_handle = handle.getRawHandle();
checkResult(zipOpenNewFileInZip3_64(
raw_handle,
filename_.c_str(),
/* zipfi= */ nullptr,
/* extrafield_local= */ nullptr,
/* size_extrafield_local= */ 0,
/* extrafield_global= */ nullptr,
/* size_extrafield_global= */ 0,
/* comment= */ nullptr,
compress_method,
compress_level,
/* raw= */ false,
/* windowBits= */ 0,
/* memLevel= */ 0,
/* strategy= */ 0,
password_cstr,
/* crc_for_crypting= */ 0,
/* zip64= */ true));
}
~WriteBufferFromZipArchive() override
{
try
{
finalize();
}
catch (...)
{
tryLogCurrentException("ZipArchiveWriter");
}
}
void sync() override { next(); }
std::string getFileName() const override { return filename; }
private:
void nextImpl() override
{
if (!offset())
return;
RawHandle raw_handle = handle.getRawHandle();
checkResult(zipWriteInFileInZip(raw_handle, working_buffer.begin(), offset()));
}
void checkResult(int code) const { handle.checkResult(code); }
HandleHolder handle;
String filename;
};
namespace
{
/// Provides a set of functions allowing the minizip library to write its output
/// to a WriteBuffer instead of an ordinary file in the local filesystem.
class StreamFromWriteBuffer
{
public:
static RawHandle open(std::unique_ptr<WriteBuffer> archive_write_buffer)
{
Opaque opaque{std::move(archive_write_buffer)};
zlib_filefunc64_def func_def;
func_def.zopen64_file = &StreamFromWriteBuffer::openFileFunc;
func_def.zclose_file = &StreamFromWriteBuffer::closeFileFunc;
func_def.zread_file = &StreamFromWriteBuffer::readFileFunc;
func_def.zwrite_file = &StreamFromWriteBuffer::writeFileFunc;
func_def.zseek64_file = &StreamFromWriteBuffer::seekFunc;
func_def.ztell64_file = &StreamFromWriteBuffer::tellFunc;
func_def.zerror_file = &StreamFromWriteBuffer::testErrorFunc;
func_def.opaque = &opaque;
return zipOpen2_64(
/* path= */ nullptr,
/* append= */ false,
/* globalcomment= */ nullptr,
&func_def);
}
private:
std::unique_ptr<WriteBuffer> write_buffer;
UInt64 start_offset = 0;
struct Opaque
{
std::unique_ptr<WriteBuffer> write_buffer;
};
static void * openFileFunc(void * opaque, const void *, int)
{
Opaque & opq = *reinterpret_cast<Opaque *>(opaque);
return new StreamFromWriteBuffer(std::move(opq.write_buffer));
}
explicit StreamFromWriteBuffer(std::unique_ptr<WriteBuffer> write_buffer_)
: write_buffer(std::move(write_buffer_)), start_offset(write_buffer->count()) {}
static int closeFileFunc(void *, void * stream)
{
delete reinterpret_cast<StreamFromWriteBuffer *>(stream);
return ZIP_OK;
}
static StreamFromWriteBuffer & get(void * ptr)
{
return *reinterpret_cast<StreamFromWriteBuffer *>(ptr);
}
static unsigned long writeFileFunc(void *, void * stream, const void * buf, unsigned long size) // NOLINT(google-runtime-int)
{
auto & strm = get(stream);
strm.write_buffer->write(reinterpret_cast<const char *>(buf), size);
return size;
}
static int testErrorFunc(void *, void *)
{
return ZIP_OK;
}
static ZPOS64_T tellFunc(void *, void * stream)
{
auto & strm = get(stream);
auto pos = strm.write_buffer->count() - strm.start_offset;
return pos;
}
static long seekFunc(void *, void *, ZPOS64_T, int) // NOLINT(google-runtime-int)
{
throw Exception(ErrorCodes::LOGICAL_ERROR, "StreamFromWriteBuffer::seek must not be called");
}
static unsigned long readFileFunc(void *, void *, void *, unsigned long) // NOLINT(google-runtime-int)
{
throw Exception(ErrorCodes::LOGICAL_ERROR, "StreamFromWriteBuffer::readFile must not be called");
}
};
}
ZipArchiveWriter::ZipArchiveWriter(const String & path_to_archive_)
: ZipArchiveWriter(path_to_archive_, nullptr)
{
}
ZipArchiveWriter::ZipArchiveWriter(const String & path_to_archive_, std::unique_ptr<WriteBuffer> archive_write_buffer_)
: path_to_archive(path_to_archive_)
{
if (archive_write_buffer_)
handle = StreamFromWriteBuffer::open(std::move(archive_write_buffer_));
else
handle = zipOpen64(path_to_archive.c_str(), /* append= */ false);
if (!handle)
throw Exception(ErrorCodes::CANNOT_PACK_ARCHIVE, "Couldn't create zip archive {}", quoteString(path_to_archive));
}
ZipArchiveWriter::~ZipArchiveWriter()
{
if (handle)
{
try
{
checkResult(zipClose(handle, /* global_comment= */ nullptr));
}
catch (...)
{
tryLogCurrentException("ZipArchiveWriter");
}
}
}
std::unique_ptr<WriteBufferFromFileBase> ZipArchiveWriter::writeFile(const String & filename)
{
return std::make_unique<WriteBufferFromZipArchive>(acquireHandle(), filename);
}
bool ZipArchiveWriter::isWritingFile() const
{
std::lock_guard lock{mutex};
return !handle;
}
void ZipArchiveWriter::setCompression(int compression_method_, int compression_level_)
{
std::lock_guard lock{mutex};
compression_method = compression_method_;
compression_level = compression_level_;
}
void ZipArchiveWriter::setPassword(const String & password_)
{
std::lock_guard lock{mutex};
password = password_;
}
ZipArchiveWriter::CompressionMethod ZipArchiveWriter::parseCompressionMethod(const String & str)
{
if (str.empty())
return CompressionMethod::kDeflate; /// Default compression method is DEFLATE.
else if (boost::iequals(str, "store"))
return CompressionMethod::kStore;
else if (boost::iequals(str, "deflate"))
return CompressionMethod::kDeflate;
else if (boost::iequals(str, "bzip2"))
return CompressionMethod::kBzip2;
else if (boost::iequals(str, "lzma"))
return CompressionMethod::kLzma;
else if (boost::iequals(str, "zstd"))
return CompressionMethod::kZstd;
else if (boost::iequals(str, "xz"))
return CompressionMethod::kXz;
else
throw Exception(ErrorCodes::CANNOT_PACK_ARCHIVE, "Unknown compression method specified for a zip archive: {}", str);
}
/// Checks that a passed compression method can be used.
void ZipArchiveWriter::checkCompressionMethodIsEnabled(CompressionMethod method)
{
switch (method)
{
case CompressionMethod::kStore: [[fallthrough]];
case CompressionMethod::kDeflate:
case CompressionMethod::kLzma:
case CompressionMethod::kXz:
case CompressionMethod::kZstd:
return;
case CompressionMethod::kBzip2:
{
#if USE_BZIP2
return;
#else
throw Exception("BZIP2 compression method is disabled", ErrorCodes::SUPPORT_IS_DISABLED);
#endif
}
}
throw Exception(ErrorCodes::CANNOT_PACK_ARCHIVE, "Unknown compression method specified for a zip archive: {}", static_cast<int>(method));
}
/// Checks that encryption is enabled.
void ZipArchiveWriter::checkEncryptionIsEnabled()
{
#if !USE_SSL
throw Exception("Encryption in zip archive is disabled", ErrorCodes::SUPPORT_IS_DISABLED);
#endif
}
ZipArchiveWriter::HandleHolder ZipArchiveWriter::acquireHandle()
{
return HandleHolder{std::static_pointer_cast<ZipArchiveWriter>(shared_from_this())};
}
RawHandle ZipArchiveWriter::acquireRawHandle()
{
std::lock_guard lock{mutex};
if (!handle)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot have more than one write buffer while writing a zip archive");
return std::exchange(handle, nullptr);
}
void ZipArchiveWriter::releaseRawHandle(RawHandle raw_handle_)
{
std::lock_guard lock{mutex};
handle = raw_handle_;
}
void ZipArchiveWriter::checkResult(int code) const
{
if (code >= ZIP_OK)
return;
String message = "Code= ";
switch (code)
{
case ZIP_ERRNO: message += "ERRNO, errno= " + String{strerror(errno)}; break;
case ZIP_PARAMERROR: message += "PARAMERROR"; break;
case ZIP_BADZIPFILE: message += "BADZIPFILE"; break;
case ZIP_INTERNALERROR: message += "INTERNALERROR"; break;
default: message += std::to_string(code); break;
}
showError(message);
}
void ZipArchiveWriter::showError(const String & message) const
{
throw Exception(ErrorCodes::CANNOT_PACK_ARCHIVE, "Couldn't pack zip archive {}: {}", quoteString(path_to_archive), message);
}
}
#endif

View File

@ -0,0 +1,97 @@
#pragma once
#include <Common/config.h>
#if USE_MINIZIP
#include <IO/Archives/IArchiveWriter.h>
#include <base/shared_ptr_helper.h>
#include <mutex>
namespace DB
{
class WriteBuffer;
class WriteBufferFromFileBase;
/// Implementation of IArchiveWriter for writing zip archives.
class ZipArchiveWriter : public shared_ptr_helper<ZipArchiveWriter>, public IArchiveWriter
{
public:
/// Destructors finalizes writing the archive.
~ZipArchiveWriter() override;
/// Starts writing a file to the archive. The function returns a write buffer,
/// any data written to that buffer will be compressed and then put to the archive.
/// You can keep only one such buffer at a time, a buffer returned by previous call
/// of the function `writeFile()` should be destroyed before next call of `writeFile()`.
std::unique_ptr<WriteBufferFromFileBase> writeFile(const String & filename) override;
/// Returns true if there is an active instance of WriteBuffer returned by writeFile().
/// This function should be used mostly for debugging purposes.
bool isWritingFile() const override;
/// Supported compression methods.
enum class CompressionMethod
{
/// See mz.h
kStore = 0,
kDeflate = 8,
kBzip2 = 12,
kLzma = 14,
kZstd = 93,
kXz = 95,
};
/// Some compression levels.
enum class CompressionLevels
{
kDefault = kDefaultCompressionLevel,
kFast = 2,
kNormal = 6,
kBest = 9,
};
/// Sets compression method and level.
/// Changing them will affect next file in the archive.
void setCompression(int compression_method_, int compression_level_) override;
/// Sets password. Only contents of the files are encrypted,
/// names of files are not encrypted.
/// Changing the password will affect next file in the archive.
void setPassword(const String & password_) override;
/// Utility functions.
static CompressionMethod parseCompressionMethod(const String & str);
static void checkCompressionMethodIsEnabled(CompressionMethod method);
static void checkEncryptionIsEnabled();
private:
/// Constructs an archive that will be written as a file in the local filesystem.
explicit ZipArchiveWriter(const String & path_to_archive_);
/// Constructs an archive that will be written by using a specified `archive_write_buffer_`.
ZipArchiveWriter(const String & path_to_archive_, std::unique_ptr<WriteBuffer> archive_write_buffer_);
friend struct shared_ptr_helper<ZipArchiveWriter>;
class WriteBufferFromZipArchive;
class HandleHolder;
using RawHandle = void *;
HandleHolder acquireHandle();
RawHandle acquireRawHandle();
void releaseRawHandle(RawHandle raw_handle_);
void checkResult(int code) const;
[[noreturn]] void showError(const String & message) const;
const String path_to_archive;
int compression_method = static_cast<int>(CompressionMethod::kDeflate);
int compression_level = kDefaultCompressionLevel;
String password;
RawHandle handle = nullptr;
mutable std::mutex mutex;
};
}
#endif

Some files were not shown because too many files have changed in this diff Show More