mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-04 21:42:39 +00:00
Merge remote-tracking branch 'origin/master' into pr-local-plan
This commit is contained in:
commit
6b1a63792d
2
contrib/azure
vendored
2
contrib/azure
vendored
@ -1 +1 @@
|
||||
Subproject commit 92c94d7f37a43cc8fc4d466884a95f610c0593bf
|
||||
Subproject commit ea3e19a7be08519134c643177d56c7484dfec884
|
2
contrib/pocketfft
vendored
2
contrib/pocketfft
vendored
@ -1 +1 @@
|
||||
Subproject commit 9efd4da52cf8d28d14531d14e43ad9d913807546
|
||||
Subproject commit f4c1aa8aa9ce79ad39e80f2c9c41b92ead90fda3
|
2
contrib/rocksdb
vendored
2
contrib/rocksdb
vendored
@ -1 +1 @@
|
||||
Subproject commit 078fa5638690004e1f744076d1bdcc4e93767304
|
||||
Subproject commit be366233921293bd07a84dc4ea6991858665f202
|
@ -5,20 +5,13 @@ if (NOT ENABLE_ROCKSDB)
|
||||
return()
|
||||
endif()
|
||||
|
||||
## this file is extracted from `contrib/rocksdb/CMakeLists.txt`
|
||||
set(ROCKSDB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/rocksdb")
|
||||
list(APPEND CMAKE_MODULE_PATH "${ROCKSDB_SOURCE_DIR}/cmake/modules/")
|
||||
|
||||
set(PORTABLE ON)
|
||||
## always disable jemalloc for rocksdb by default
|
||||
## because it introduces non-standard jemalloc APIs
|
||||
# Always disable jemalloc for rocksdb by default because it introduces non-standard jemalloc APIs
|
||||
option(WITH_JEMALLOC "build with JeMalloc" OFF)
|
||||
set(USE_SNAPPY OFF)
|
||||
if (TARGET ch_contrib::snappy)
|
||||
set(USE_SNAPPY ON)
|
||||
endif()
|
||||
option(WITH_SNAPPY "build with SNAPPY" ${USE_SNAPPY})
|
||||
## lz4, zlib, zstd is enabled in ClickHouse by default
|
||||
|
||||
option(WITH_LIBURING "build with liburing" OFF) # TODO could try to enable this conditionally, depending on ClickHouse's ENABLE_LIBURING
|
||||
|
||||
# ClickHouse cannot be compiled without snappy, lz4, zlib, zstd
|
||||
option(WITH_SNAPPY "build with SNAPPY" ON)
|
||||
option(WITH_LZ4 "build with lz4" ON)
|
||||
option(WITH_ZLIB "build with zlib" ON)
|
||||
option(WITH_ZSTD "build with zstd" ON)
|
||||
@ -26,78 +19,46 @@ option(WITH_ZSTD "build with zstd" ON)
|
||||
# third-party/folly is only validated to work on Linux and Windows for now.
|
||||
# So only turn it on there by default.
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "Linux|Windows")
|
||||
if(MSVC AND MSVC_VERSION LESS 1910)
|
||||
# Folly does not compile with MSVC older than VS2017
|
||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
||||
else()
|
||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" ON)
|
||||
endif()
|
||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" ON)
|
||||
else()
|
||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
||||
endif()
|
||||
|
||||
if( NOT DEFINED CMAKE_CXX_STANDARD )
|
||||
set(CMAKE_CXX_STANDARD 11)
|
||||
if(WITH_SNAPPY)
|
||||
add_definitions(-DSNAPPY)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::snappy)
|
||||
endif()
|
||||
|
||||
if(MSVC)
|
||||
option(WITH_XPRESS "build with windows built in compression" OFF)
|
||||
include("${ROCKSDB_SOURCE_DIR}/thirdparty.inc")
|
||||
else()
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD" AND NOT CMAKE_SYSTEM_NAME MATCHES "kFreeBSD")
|
||||
# FreeBSD has jemalloc as default malloc
|
||||
# but it does not have all the jemalloc files in include/...
|
||||
set(WITH_JEMALLOC ON)
|
||||
else()
|
||||
if(WITH_JEMALLOC AND TARGET ch_contrib::jemalloc)
|
||||
add_definitions(-DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::jemalloc)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_SNAPPY)
|
||||
add_definitions(-DSNAPPY)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::snappy)
|
||||
endif()
|
||||
|
||||
if(WITH_ZLIB)
|
||||
add_definitions(-DZLIB)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zlib)
|
||||
endif()
|
||||
|
||||
if(WITH_LZ4)
|
||||
add_definitions(-DLZ4)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::lz4)
|
||||
endif()
|
||||
|
||||
if(WITH_ZSTD)
|
||||
add_definitions(-DZSTD)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
||||
endif()
|
||||
if(WITH_ZLIB)
|
||||
add_definitions(-DZLIB)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zlib)
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
||||
if(POWER9)
|
||||
set(HAS_POWER9 1)
|
||||
set(HAS_ALTIVEC 1)
|
||||
else()
|
||||
set(HAS_POWER8 1)
|
||||
set(HAS_ALTIVEC 1)
|
||||
endif(POWER9)
|
||||
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
||||
if(WITH_LZ4)
|
||||
add_definitions(-DLZ4)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::lz4)
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64")
|
||||
set(HAS_ARMV8_CRC 1)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64")
|
||||
if(WITH_ZSTD)
|
||||
add_definitions(-DZSTD)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
||||
endif()
|
||||
|
||||
option(PORTABLE "build a portable binary" ON)
|
||||
|
||||
if(ENABLE_AVX2 AND ENABLE_PCLMULQDQ)
|
||||
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
|
||||
add_definitions(-DHAVE_SSE42)
|
||||
add_definitions(-DHAVE_PCLMUL)
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64|AARCH64")
|
||||
set (HAS_ARMV8_CRC 1)
|
||||
# the original build descriptions set specific flags for ARM. These flags are already subsumed by ClickHouse's general
|
||||
# ARM flags, see cmake/cpu_features.cmake
|
||||
# set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||
endif()
|
||||
|
||||
set (HAVE_THREAD_LOCAL 1)
|
||||
if(HAVE_THREAD_LOCAL)
|
||||
add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL)
|
||||
@ -107,8 +68,6 @@ if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
||||
add_definitions(-DOS_MACOSX)
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||
add_definitions(-DOS_LINUX)
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "SunOS")
|
||||
add_definitions(-DOS_SOLARIS)
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
||||
add_definitions(-DOS_FREEBSD)
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Android")
|
||||
@ -123,12 +82,10 @@ endif()
|
||||
|
||||
if (OS_LINUX)
|
||||
add_definitions(-DROCKSDB_SCHED_GETCPU_PRESENT)
|
||||
add_definitions(-DROCKSDB_AUXV_SYSAUXV_PRESENT)
|
||||
add_definitions(-DROCKSDB_AUXV_GETAUXVAL_PRESENT)
|
||||
elseif (OS_FREEBSD)
|
||||
add_definitions(-DROCKSDB_AUXV_SYSAUXV_PRESENT)
|
||||
endif()
|
||||
|
||||
set(ROCKSDB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/rocksdb")
|
||||
|
||||
include_directories(${ROCKSDB_SOURCE_DIR})
|
||||
include_directories("${ROCKSDB_SOURCE_DIR}/include")
|
||||
@ -136,11 +93,11 @@ if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
||||
include_directories("${ROCKSDB_SOURCE_DIR}/third-party/folly")
|
||||
endif()
|
||||
|
||||
# Main library source code
|
||||
|
||||
set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/cache/cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/cache_entry_roles.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/cache_key.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/cache_reservation_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
|
||||
@ -156,6 +113,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/prefetch_buffer_collection.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/c.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/column_family.cc
|
||||
@ -229,6 +187,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/mock_env.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/unique_id_gen.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/file_util.cc
|
||||
@ -247,6 +206,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memory/memory_allocator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc
|
||||
@ -322,6 +282,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/table/table_factory.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/table_properties.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/unique_id.cc
|
||||
${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc
|
||||
${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc
|
||||
@ -333,9 +294,12 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record_handler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record_result.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/coding.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/comparator.cc
|
||||
@ -347,6 +311,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/random.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/regex.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/ribbon_config.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/slice.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc
|
||||
@ -362,18 +327,23 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/cache_dump_load.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/cache_dump_load_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/debug.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_secondary_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc
|
||||
@ -393,6 +363,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/trace/replayer_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc
|
||||
@ -411,6 +382,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/wal_filter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/concurrent_tree.cc
|
||||
@ -425,7 +397,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/standalone_port.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/dbt.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc
|
||||
rocksdb_build_version.cc)
|
||||
build_version.cc) # generated by hand
|
||||
|
||||
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
|
||||
set_source_files_properties(
|
||||
@ -462,5 +434,6 @@ endif()
|
||||
add_library(_rocksdb ${SOURCES})
|
||||
add_library(ch_contrib::rocksdb ALIAS _rocksdb)
|
||||
target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
|
||||
|
||||
# SYSTEM is required to overcome some issues
|
||||
target_include_directories(_rocksdb SYSTEM BEFORE INTERFACE "${ROCKSDB_SOURCE_DIR}/include")
|
||||
|
@ -309,7 +309,7 @@ function run_tests()
|
||||
try_run_with_retry 10 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')"
|
||||
|
||||
set +e
|
||||
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
||||
timeout -k 60m -s TERM --preserve-status 140m clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
||||
--no-drop-if-fail --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
||||
| ts '%Y-%m-%d %H:%M:%S' \
|
||||
| tee -a test_output/test_result.txt
|
||||
|
@ -16,7 +16,7 @@ sidebar_label: clickhouse-local
|
||||
|
||||
While `clickhouse-local` is a great tool for development and testing purposes, and for processing files, it is not suitable for serving end users or applications. In these scenarios, it is recommended to use the open-source [ClickHouse](https://clickhouse.com/docs/en/install). ClickHouse is a powerful OLAP database that is designed to handle large-scale analytical workloads. It provides fast and efficient processing of complex queries on large datasets, making it ideal for use in production environments where high-performance is critical. Additionally, ClickHouse offers a wide range of features such as replication, sharding, and high availability, which are essential for scaling up to handle large datasets and serving applications. If you need to handle larger datasets or serve end users or applications, we recommend using open-source ClickHouse instead of `clickhouse-local`.
|
||||
|
||||
Please read the docs below that show example use cases for `clickhouse-local`, such as [querying local CSVs](#query-data-in-a-csv-file-using-sql) or [reading a parquet file in S3](#query-data-in-a-parquet-file-in-aws-s3).
|
||||
Please read the docs below that show example use cases for `clickhouse-local`, such as [querying local file](#query_data_in_file) or [reading a parquet file in S3](#query-data-in-a-parquet-file-in-aws-s3).
|
||||
|
||||
## Download clickhouse-local
|
||||
|
||||
|
@ -18,7 +18,7 @@ ClickHouse also supports:
|
||||
|
||||
During aggregation, all `NULL` arguments are skipped. If the aggregation has several arguments it will ignore any row in which one or more of them are NULL.
|
||||
|
||||
There is an exception to this rule, which are the functions [`first_value`](../../sql-reference/aggregate-functions/reference/first_value.md), [`last_value`](../../sql-reference/aggregate-functions/reference/last_value.md) and their aliases when followed by the modifier `RESPECT NULLS`: `FIRST_VALUE(b) RESPECT NULLS`.
|
||||
There is an exception to this rule, which are the functions [`first_value`](../../sql-reference/aggregate-functions/reference/first_value.md), [`last_value`](../../sql-reference/aggregate-functions/reference/last_value.md) and their aliases (`any` and `anyLast` respectively) when followed by the modifier `RESPECT NULLS`. For example, `FIRST_VALUE(b) RESPECT NULLS`.
|
||||
|
||||
**Examples:**
|
||||
|
||||
|
@ -5,12 +5,12 @@ sidebar_position: 102
|
||||
|
||||
# any
|
||||
|
||||
Selects the first encountered value of a column.
|
||||
Selects the first encountered value of a column, ignoring any `NULL` values.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
any(column)
|
||||
any(column) [RESPECT NULLS]
|
||||
```
|
||||
|
||||
Aliases: `any_value`, [`first_value`](../reference/first_value.md).
|
||||
@ -20,7 +20,9 @@ Aliases: `any_value`, [`first_value`](../reference/first_value.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
By default, it ignores NULL values and returns the first NOT NULL value found in the column. Like [`first_value`](../../../sql-reference/aggregate-functions/reference/first_value.md) it supports `RESPECT NULLS`, in which case it will select the first value passed, independently on whether it's NULL or not.
|
||||
:::note
|
||||
Supports the `RESPECT NULLS` modifier after the function name. Using this modifier will ensure the function selects the first value passed, regardless of whether it is `NULL` or not.
|
||||
:::
|
||||
|
||||
:::note
|
||||
The return type of the function is the same as the input, except for LowCardinality which is discarded. This means that given no rows as input it will return the default value of that type (0 for integers, or Null for a Nullable() column). You might use the `-OrNull` [combinator](../../../sql-reference/aggregate-functions/combinators.md) ) to modify this behaviour.
|
||||
|
@ -1,44 +0,0 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/any_respect_nulls
|
||||
sidebar_position: 103
|
||||
---
|
||||
|
||||
# any_respect_nulls
|
||||
|
||||
Selects the first encountered value of a column, irregardless of whether it is a `NULL` value or not.
|
||||
|
||||
Alias: `any_value_respect_nulls`, `first_value_repect_nulls`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
any_respect_nulls(column)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
- `column`: The column name.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The last value encountered, irregardless of whether it is a `NULL` value or not.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
CREATE TABLE any_nulls (city Nullable(String)) ENGINE=Log;
|
||||
|
||||
INSERT INTO any_nulls (city) VALUES (NULL), ('Amsterdam'), ('New York'), ('Tokyo'), ('Valencia'), (NULL);
|
||||
|
||||
SELECT any(city), any_respect_nulls(city) FROM any_nulls;
|
||||
```
|
||||
|
||||
```response
|
||||
┌─any(city)─┬─any_respect_nulls(city)─┐
|
||||
│ Amsterdam │ ᴺᵁᴸᴸ │
|
||||
└───────────┴─────────────────────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
- [any](../reference/any.md)
|
@ -5,17 +5,21 @@ sidebar_position: 105
|
||||
|
||||
# anyLast
|
||||
|
||||
Selects the last value encountered. The result is just as indeterminate as for the [any](../../../sql-reference/aggregate-functions/reference/any.md) function.
|
||||
Selects the last value encountered, ignoring any `NULL` values by default. The result is just as indeterminate as for the [any](../../../sql-reference/aggregate-functions/reference/any.md) function.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
anyLast(column)
|
||||
anyLast(column) [RESPECT NULLS]
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
- `column`: The column name.
|
||||
|
||||
:::note
|
||||
Supports the `RESPECT NULLS` modifier after the function name. Using this modifier will ensure the function selects the first value passed, regardless of whether it is `NULL` or not.
|
||||
:::
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The last value encountered.
|
||||
|
@ -1,39 +0,0 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/anylast_respect_nulls
|
||||
sidebar_position: 106
|
||||
---
|
||||
|
||||
# anyLast_respect_nulls
|
||||
|
||||
Selects the last value encountered, irregardless of whether it is `NULL` or not.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
anyLast_respect_nulls(column)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
- `column`: The column name.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The last value encountered, irregardless of whether it is `NULL` or not.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
CREATE TABLE any_last_nulls (city Nullable(String)) ENGINE=Log;
|
||||
|
||||
INSERT INTO any_last_nulls (city) VALUES ('Amsterdam'),(NULL),('New York'),('Tokyo'),('Valencia'),(NULL);
|
||||
|
||||
SELECT anyLast(city), anyLast_respect_nulls(city) FROM any_last_nulls;
|
||||
```
|
||||
|
||||
```response
|
||||
┌─anyLast(city)─┬─anyLast_respect_nulls(city)─┐
|
||||
│ Valencia │ ᴺᵁᴸᴸ │
|
||||
└───────────────┴─────────────────────────────┘
|
||||
```
|
@ -45,10 +45,9 @@ ClickHouse-specific aggregate functions:
|
||||
|
||||
- [aggThrow](../reference/aggthrow.md)
|
||||
- [analysisOfVariance](../reference/analysis_of_variance.md)
|
||||
- [any](../reference/any_respect_nulls.md)
|
||||
- [any](../reference/any.md)
|
||||
- [anyHeavy](../reference/anyheavy.md)
|
||||
- [anyLast](../reference/anylast.md)
|
||||
- [anyLast](../reference/anylast_respect_nulls.md)
|
||||
- [boundingRatio](../reference/boundrat.md)
|
||||
- [first_value](../reference/first_value.md)
|
||||
- [last_value](../reference/last_value.md)
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/data-types/dynamic
|
||||
sidebar_position: 56
|
||||
sidebar_position: 62
|
||||
sidebar_label: Dynamic
|
||||
---
|
||||
|
||||
@ -494,13 +494,43 @@ SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) O
|
||||
|
||||
As we can see, ClickHouse kept the most frequent types `UInt64` and `Array(UInt64)` and casted all other types to `String`.
|
||||
|
||||
## JSONExtract functions with Dynamic
|
||||
|
||||
All `JSONExtract*` functions support `Dynamic` type:
|
||||
|
||||
```sql
|
||||
SELECT JSONExtract('{"a" : [1, 2, 3]}', 'a', 'Dynamic') AS dynamic, dynamicType(dynamic) AS dynamic_type;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─dynamic─┬─dynamic_type───────────┐
|
||||
│ [1,2,3] │ Array(Nullable(Int64)) │
|
||||
└─────────┴────────────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT JSONExtract('{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}', 'obj', 'Map(String, Variant(UInt32, String, Array(UInt32)))') AS map_of_dynamics, mapApply((k, v) -> (k, variantType(v)), map_of_dynamics) AS map_of_dynamic_types```
|
||||
|
||||
```text
|
||||
┌─map_of_dynamics──────────────────┬─map_of_dynamic_types────────────────────────────┐
|
||||
│ {'a':42,'b':'Hello','c':[1,2,3]} │ {'a':'UInt32','b':'String','c':'Array(UInt32)'} │
|
||||
└──────────────────────────────────┴─────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT JSONExtractKeysAndValues('{"a" : 42, "b" : "Hello", "c" : [1,2,3]}', 'Variant(UInt32, String, Array(UInt32))') AS dynamics, arrayMap(x -> (x.1, variantType(x.2)), dynamics) AS dynamic_types```
|
||||
```
|
||||
|
||||
```text
|
||||
┌─dynamics───────────────────────────────┬─dynamic_types─────────────────────────────────────────┐
|
||||
│ [('a',42),('b','Hello'),('c',[1,2,3])] │ [('a','UInt32'),('b','String'),('c','Array(UInt32)')] │
|
||||
└────────────────────────────────────────┴───────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Binary output format
|
||||
|
||||
In [RowBinary](../../interfaces/formats.md#rowbinary-rowbinary) format values of `Dynamic` type are serialized in the following format:
|
||||
In RowBinary format values of `Dynamic` type are serialized in the following format:
|
||||
|
||||
```text
|
||||
<binary_encoded_data_type><value_in_binary_format_according_to_the_data_type>
|
||||
```
|
||||
|
||||
See the [data types binary encoding specification](../../sql-reference/data-types/data-types-binary-encoding.md)
|
||||
|
@ -3080,4 +3080,4 @@ Result:
|
||||
|
||||
## Distance functions
|
||||
|
||||
All supported functions are described in [distance functions documentation](../../sql-reference/functions/distance-functions.md).
|
||||
All supported functions are described in [distance functions documentation](../../sql-reference/functions/distance-functions.md).
|
@ -314,10 +314,71 @@ SELECT groupBitXor(cityHash64(*)) FROM table
|
||||
Calculates a 32-bit hash code from any type of integer.
|
||||
This is a relatively fast non-cryptographic hash function of average quality for numbers.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
intHash32(int)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `int` — Integer to hash. [(U)Int*](../data-types/int-uint.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- 32-bit hash code. [UInt32](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT intHash32(42);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─intHash32(42)─┐
|
||||
│ 1228623923 │
|
||||
└───────────────┘
|
||||
```
|
||||
|
||||
## intHash64
|
||||
|
||||
Calculates a 64-bit hash code from any type of integer.
|
||||
It works faster than intHash32. Average quality.
|
||||
This is a relatively fast non-cryptographic hash function of average quality for numbers.
|
||||
It works faster than [intHash32](#inthash32).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
intHash64(int)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `int` — Integer to hash. [(U)Int*](../data-types/int-uint.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- 64-bit hash code. [UInt64](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT intHash64(42);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌────────intHash64(42)─┐
|
||||
│ 11490350930367293593 │
|
||||
└──────────────────────┘
|
||||
```
|
||||
|
||||
## SHA1, SHA224, SHA256, SHA512, SHA512_256
|
||||
|
||||
|
73
docs/en/sql-reference/window-functions/dense_rank.md
Normal file
73
docs/en/sql-reference/window-functions/dense_rank.md
Normal file
@ -0,0 +1,73 @@
|
||||
---
|
||||
slug: /en/sql-reference/window-functions/dense_rank
|
||||
sidebar_label: dense_rank
|
||||
sidebar_position: 7
|
||||
---
|
||||
|
||||
# dense_rank
|
||||
|
||||
Ranks the current row within its partition without gaps. In other words, if the value of any new row encountered is equal to the value of one of the previous rows then it will receive the next successive rank without any gaps in ranking.
|
||||
|
||||
The [rank](./rank.md) function provides the same behaviour, but with gaps in ranking.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
dense_rank (column_name)
|
||||
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
||||
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
|
||||
FROM table_name
|
||||
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
|
||||
```
|
||||
|
||||
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A number for the current row within its partition, without gaps in ranking. [UInt64](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
The following example is based on the example provided in the video instructional [Ranking window functions in ClickHouse](https://youtu.be/Yku9mmBYm_4?si=XIMu1jpYucCQEoXA).
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
CREATE TABLE salaries
|
||||
(
|
||||
`team` String,
|
||||
`player` String,
|
||||
`salary` UInt32,
|
||||
`position` String
|
||||
)
|
||||
Engine = Memory;
|
||||
|
||||
INSERT INTO salaries FORMAT Values
|
||||
('Port Elizabeth Barbarians', 'Gary Chen', 195000, 'F'),
|
||||
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
|
||||
('Port Elizabeth Barbarians', 'Michael Stanley', 150000, 'D'),
|
||||
('New Coreystad Archdukes', 'Scott Harrison', 150000, 'D'),
|
||||
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M'),
|
||||
('South Hampton Seagulls', 'Douglas Benson', 150000, 'M'),
|
||||
('South Hampton Seagulls', 'James Henderson', 140000, 'M');
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT player, salary,
|
||||
dense_rank() OVER (ORDER BY salary DESC) AS dense_rank
|
||||
FROM salaries;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─player──────────┬─salary─┬─dense_rank─┐
|
||||
1. │ Gary Chen │ 195000 │ 1 │
|
||||
2. │ Robert George │ 195000 │ 1 │
|
||||
3. │ Charles Juarez │ 190000 │ 2 │
|
||||
4. │ Michael Stanley │ 150000 │ 3 │
|
||||
5. │ Douglas Benson │ 150000 │ 3 │
|
||||
6. │ Scott Harrison │ 150000 │ 3 │
|
||||
7. │ James Henderson │ 140000 │ 4 │
|
||||
└─────────────────┴────────┴────────────┘
|
||||
```
|
79
docs/en/sql-reference/window-functions/first_value.md
Normal file
79
docs/en/sql-reference/window-functions/first_value.md
Normal file
@ -0,0 +1,79 @@
|
||||
---
|
||||
slug: /en/sql-reference/window-functions/first_value
|
||||
sidebar_label: first_value
|
||||
sidebar_position: 3
|
||||
---
|
||||
|
||||
# first_value
|
||||
|
||||
Returns the first value evaluated within its ordered frame. By default, NULL arguments are skipped, however the `RESPECT NULLS` modifier can be used to override this behaviour.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
first_value (column_name) [[RESPECT NULLS] | [IGNORE NULLS]]
|
||||
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
||||
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
|
||||
FROM table_name
|
||||
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
|
||||
```
|
||||
|
||||
Alias: `any`.
|
||||
|
||||
:::note
|
||||
Using the optional modifier `RESPECT NULLS` after `first_value(column_name)` will ensure that `NULL` arguments are not skipped.
|
||||
See [NULL processing](../aggregate-functions/index.md/#null-processing) for more information.
|
||||
:::
|
||||
|
||||
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The first value evaluated within its ordered frame.
|
||||
|
||||
**Example**
|
||||
|
||||
In this example the `first_value` function is used to find the highest paid footballer from a fictional dataset of salaries of Premier League football players.
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS salaries;
|
||||
CREATE TABLE salaries
|
||||
(
|
||||
`team` String,
|
||||
`player` String,
|
||||
`salary` UInt32,
|
||||
`position` String
|
||||
)
|
||||
Engine = Memory;
|
||||
|
||||
INSERT INTO salaries FORMAT Values
|
||||
('Port Elizabeth Barbarians', 'Gary Chen', 196000, 'F'),
|
||||
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
|
||||
('Port Elizabeth Barbarians', 'Michael Stanley', 100000, 'D'),
|
||||
('New Coreystad Archdukes', 'Scott Harrison', 180000, 'D'),
|
||||
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M'),
|
||||
('South Hampton Seagulls', 'Douglas Benson', 150000, 'M'),
|
||||
('South Hampton Seagulls', 'James Henderson', 140000, 'M');
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT player, salary,
|
||||
first_value(player) OVER (ORDER BY salary DESC) AS highest_paid_player
|
||||
FROM salaries;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─player──────────┬─salary─┬─highest_paid_player─┐
|
||||
1. │ Gary Chen │ 196000 │ Gary Chen │
|
||||
2. │ Robert George │ 195000 │ Gary Chen │
|
||||
3. │ Charles Juarez │ 190000 │ Gary Chen │
|
||||
4. │ Scott Harrison │ 180000 │ Gary Chen │
|
||||
5. │ Douglas Benson │ 150000 │ Gary Chen │
|
||||
6. │ James Henderson │ 140000 │ Gary Chen │
|
||||
7. │ Michael Stanley │ 100000 │ Gary Chen │
|
||||
└─────────────────┴────────┴─────────────────────┘
|
||||
```
|
@ -1,10 +1,11 @@
|
||||
---
|
||||
slug: /en/sql-reference/window-functions/
|
||||
sidebar_position: 62
|
||||
sidebar_label: Window Functions
|
||||
title: Window Functions
|
||||
sidebar_position: 1
|
||||
---
|
||||
|
||||
# Window Functions
|
||||
|
||||
Windows functions let you perform calculations across a set of rows that are related to the current row.
|
||||
Some of the calculations that you can do are similar to those that can be done with an aggregate function, but a window function doesn't cause rows to be grouped into a single output - the individual rows are still returned.
|
||||
|
||||
@ -12,8 +13,8 @@ Some of the calculations that you can do are similar to those that can be done w
|
||||
|
||||
ClickHouse supports the standard grammar for defining windows and window functions. The table below indicates whether a feature is currently supported.
|
||||
|
||||
| Feature | Supported? |
|
||||
|------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Feature | Supported? |
|
||||
|--------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| ad hoc window specification (`count(*) over (partition by id order by time desc)`) | ✅ |
|
||||
| expressions involving window functions, e.g. `(count(*) over ()) / 2)` | ✅ |
|
||||
| `WINDOW` clause (`select ... from table window w as (partition by id)`) | ✅ |
|
||||
@ -75,14 +76,14 @@ WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
||||
|
||||
These functions can be used only as a window function.
|
||||
|
||||
- `row_number()` - Number the current row within its partition starting from 1.
|
||||
- `first_value(x)` - Return the first non-NULL value evaluated within its ordered frame.
|
||||
- `last_value(x)` - Return the last non-NULL value evaluated within its ordered frame.
|
||||
- `nth_value(x, offset)` - Return the first non-NULL value evaluated against the nth row (offset) in its ordered frame.
|
||||
- `rank()` - Rank the current row within its partition with gaps.
|
||||
- `dense_rank()` - Rank the current row within its partition without gaps.
|
||||
- `lagInFrame(x[, offset[, default]])` - Return a value evaluated at the row that is at a specified physical offset row before the current row within the ordered frame. The offset parameter, if not specified, defaults to 1, meaning it will fetch the value from the next row. If the calculated row exceeds the boundaries of the window frame, the specified default value is returned.
|
||||
- `leadInFrame(x[, offset[, default]])` - Return a value evaluated at the row that is offset rows after the current row within the ordered frame. If offset is not provided, it defaults to 1. If the offset leads to a position outside the window frame, the specified default value is used.
|
||||
- [`row_number()`](./row_number.md) - Number the current row within its partition starting from 1.
|
||||
- [`first_value(x)`](./first_value.md) - Return the first value evaluated within its ordered frame.
|
||||
- [`last_value(x)`](./last_value.md) - Return the last value evaluated within its ordered frame.
|
||||
- [`nth_value(x, offset)`](./nth_value.md) - Return the first non-NULL value evaluated against the nth row (offset) in its ordered frame.
|
||||
- [`rank()`](./rank.md) - Rank the current row within its partition with gaps.
|
||||
- [`dense_rank()`](./dense_rank.md) - Rank the current row within its partition without gaps.
|
||||
- [`lagInFrame(x)`](./lagInFrame.md) - Return a value evaluated at the row that is at a specified physical offset row before the current row within the ordered frame.
|
||||
- [`leadInFrame(x)`](./leadInFrame.md) - Return a value evaluated at the row that is offset rows after the current row within the ordered frame.
|
||||
|
||||
## Examples
|
||||
|
||||
|
79
docs/en/sql-reference/window-functions/lagInFrame.md
Normal file
79
docs/en/sql-reference/window-functions/lagInFrame.md
Normal file
@ -0,0 +1,79 @@
|
||||
---
|
||||
slug: /en/sql-reference/window-functions/lagInFrame
|
||||
sidebar_label: lagInFrame
|
||||
sidebar_position: 8
|
||||
---
|
||||
|
||||
# lagInFrame
|
||||
|
||||
Returns a value evaluated at the row that is at a specified physical offset row before the current row within the ordered frame.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
lagInFrame(x[, offset[, default]])
|
||||
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
||||
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
|
||||
FROM table_name
|
||||
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
|
||||
```
|
||||
|
||||
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
|
||||
|
||||
**Parameters**
|
||||
- `x` — Column name.
|
||||
- `offset` — Offset to apply. [(U)Int*](../data-types/int-uint.md). (Optional - `1` by default).
|
||||
- `default` — Value to return if calculated row exceeds the boundaries of the window frame. (Optional - `null` by default).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Value evaluated at the row that is at a specified physical offset before the current row within the ordered frame.
|
||||
|
||||
**Example**
|
||||
|
||||
This example looks at historical data for a specific stock and uses the `lagInFrame` function to calculate a day-to-day delta and percentage change in the closing price of the stock.
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
CREATE TABLE stock_prices
|
||||
(
|
||||
`date` Date,
|
||||
`open` Float32, -- opening price
|
||||
`high` Float32, -- daily high
|
||||
`low` Float32, -- daily low
|
||||
`close` Float32, -- closing price
|
||||
`volume` UInt32 -- trade volume
|
||||
)
|
||||
Engine = Memory;
|
||||
|
||||
INSERT INTO stock_prices FORMAT Values
|
||||
('2024-06-03', 113.62, 115.00, 112.00, 115.00, 438392000),
|
||||
('2024-06-04', 115.72, 116.60, 114.04, 116.44, 403324000),
|
||||
('2024-06-05', 118.37, 122.45, 117.47, 122.44, 528402000),
|
||||
('2024-06-06', 124.05, 125.59, 118.32, 121.00, 664696000),
|
||||
('2024-06-07', 119.77, 121.69, 118.02, 120.89, 412386000);
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
date,
|
||||
close,
|
||||
lagInFrame(close, 1, close) OVER (ORDER BY date ASC) AS previous_day_close,
|
||||
COALESCE(ROUND(close - previous_day_close, 2)) AS delta,
|
||||
COALESCE(ROUND((delta / previous_day_close) * 100, 2)) AS percent_change
|
||||
FROM stock_prices
|
||||
ORDER BY date DESC;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌───────date─┬──close─┬─previous_day_close─┬─delta─┬─percent_change─┐
|
||||
1. │ 2024-06-07 │ 120.89 │ 121 │ -0.11 │ -0.09 │
|
||||
2. │ 2024-06-06 │ 121 │ 122.44 │ -1.44 │ -1.18 │
|
||||
3. │ 2024-06-05 │ 122.44 │ 116.44 │ 6 │ 5.15 │
|
||||
4. │ 2024-06-04 │ 116.44 │ 115 │ 1.44 │ 1.25 │
|
||||
5. │ 2024-06-03 │ 115 │ 115 │ 0 │ 0 │
|
||||
└────────────┴────────┴────────────────────┴───────┴────────────────┘
|
||||
```
|
79
docs/en/sql-reference/window-functions/last_value.md
Normal file
79
docs/en/sql-reference/window-functions/last_value.md
Normal file
@ -0,0 +1,79 @@
|
||||
---
|
||||
slug: /en/sql-reference/window-functions/last_value
|
||||
sidebar_label: last_value
|
||||
sidebar_position: 4
|
||||
---
|
||||
|
||||
# last_value
|
||||
|
||||
Returns the last value evaluated within its ordered frame. By default, NULL arguments are skipped, however the `RESPECT NULLS` modifier can be used to override this behaviour.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
last_value (column_name) [[RESPECT NULLS] | [IGNORE NULLS]]
|
||||
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
||||
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
|
||||
FROM table_name
|
||||
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
|
||||
```
|
||||
|
||||
Alias: `anyLast`.
|
||||
|
||||
:::note
|
||||
Using the optional modifier `RESPECT NULLS` after `first_value(column_name)` will ensure that `NULL` arguments are not skipped.
|
||||
See [NULL processing](../aggregate-functions/index.md/#null-processing) for more information.
|
||||
:::
|
||||
|
||||
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The last value evaluated within its ordered frame.
|
||||
|
||||
**Example**
|
||||
|
||||
In this example the `last_value` function is used to find the highest paid footballer from a fictional dataset of salaries of Premier League football players.
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS salaries;
|
||||
CREATE TABLE salaries
|
||||
(
|
||||
`team` String,
|
||||
`player` String,
|
||||
`salary` UInt32,
|
||||
`position` String
|
||||
)
|
||||
Engine = Memory;
|
||||
|
||||
INSERT INTO salaries FORMAT Values
|
||||
('Port Elizabeth Barbarians', 'Gary Chen', 196000, 'F'),
|
||||
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
|
||||
('Port Elizabeth Barbarians', 'Michael Stanley', 100000, 'D'),
|
||||
('New Coreystad Archdukes', 'Scott Harrison', 180000, 'D'),
|
||||
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M'),
|
||||
('South Hampton Seagulls', 'Douglas Benson', 150000, 'M'),
|
||||
('South Hampton Seagulls', 'James Henderson', 140000, 'M');
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT player, salary,
|
||||
last_value(player) OVER (ORDER BY salary DESC RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS lowest_paid_player
|
||||
FROM salaries;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─player──────────┬─salary─┬─lowest_paid_player─┐
|
||||
1. │ Gary Chen │ 196000 │ Michael Stanley │
|
||||
2. │ Robert George │ 195000 │ Michael Stanley │
|
||||
3. │ Charles Juarez │ 190000 │ Michael Stanley │
|
||||
4. │ Scott Harrison │ 180000 │ Michael Stanley │
|
||||
5. │ Douglas Benson │ 150000 │ Michael Stanley │
|
||||
6. │ James Henderson │ 140000 │ Michael Stanley │
|
||||
7. │ Michael Stanley │ 100000 │ Michael Stanley │
|
||||
└─────────────────┴────────┴────────────────────┘
|
||||
```
|
60
docs/en/sql-reference/window-functions/leadInFrame.md
Normal file
60
docs/en/sql-reference/window-functions/leadInFrame.md
Normal file
@ -0,0 +1,60 @@
|
||||
---
|
||||
slug: /en/sql-reference/window-functions/leadInFrame
|
||||
sidebar_label: leadInFrame
|
||||
sidebar_position: 9
|
||||
---
|
||||
|
||||
# leadInFrame
|
||||
|
||||
Returns a value evaluated at the row that is offset rows after the current row within the ordered frame.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
leadInFrame(x[, offset[, default]])
|
||||
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
||||
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
|
||||
FROM table_name
|
||||
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
|
||||
```
|
||||
|
||||
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
|
||||
|
||||
**Parameters**
|
||||
- `x` — Column name.
|
||||
- `offset` — Offset to apply. [(U)Int*](../data-types/int-uint.md). (Optional - `1` by default).
|
||||
- `default` — Value to return if calculated row exceeds the boundaries of the window frame. (Optional - `null` by default).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- value evaluated at the row that is offset rows after the current row within the ordered frame.
|
||||
|
||||
**Example**
|
||||
|
||||
This example looks at [historical data](https://www.kaggle.com/datasets/sazidthe1/nobel-prize-data) for Nobel Prize winners and uses the `leadInFrame` function to return a list of successive winners in the physics category.
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
CREATE OR REPLACE VIEW nobel_prize_laureates AS FROM file('nobel_laureates_data.csv') SELECT *;
|
||||
```
|
||||
|
||||
```sql
|
||||
FROM nobel_prize_laureates SELECT fullName, leadInFrame(year, 1, year) OVER (PARTITION BY category ORDER BY year) AS year, category, motivation WHERE category == 'physics' ORDER BY year DESC LIMIT 9;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─fullName─────────┬─year─┬─category─┬─motivation─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
1. │ Pierre Agostini │ 2023 │ physics │ for experimental methods that generate attosecond pulses of light for the study of electron dynamics in matter │
|
||||
2. │ Ferenc Krausz │ 2023 │ physics │ for experimental methods that generate attosecond pulses of light for the study of electron dynamics in matter │
|
||||
3. │ Anne L Huillier │ 2023 │ physics │ for experimental methods that generate attosecond pulses of light for the study of electron dynamics in matter │
|
||||
4. │ Alain Aspect │ 2022 │ physics │ for experiments with entangled photons establishing the violation of Bell inequalities and pioneering quantum information science │
|
||||
5. │ Anton Zeilinger │ 2022 │ physics │ for experiments with entangled photons establishing the violation of Bell inequalities and pioneering quantum information science │
|
||||
6. │ John Clauser │ 2022 │ physics │ for experiments with entangled photons establishing the violation of Bell inequalities and pioneering quantum information science │
|
||||
7. │ Syukuro Manabe │ 2021 │ physics │ for the physical modelling of Earths climate quantifying variability and reliably predicting global warming │
|
||||
8. │ Klaus Hasselmann │ 2021 │ physics │ for the physical modelling of Earths climate quantifying variability and reliably predicting global warming │
|
||||
9. │ Giorgio Parisi │ 2021 │ physics │ for the discovery of the interplay of disorder and fluctuations in physical systems from atomic to planetary scales │
|
||||
└──────────────────┴──────┴──────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
75
docs/en/sql-reference/window-functions/nth_value.md
Normal file
75
docs/en/sql-reference/window-functions/nth_value.md
Normal file
@ -0,0 +1,75 @@
|
||||
---
|
||||
slug: /en/sql-reference/window-functions/nth_value
|
||||
sidebar_label: nth_value
|
||||
sidebar_position: 5
|
||||
---
|
||||
|
||||
# nth_value
|
||||
|
||||
Returns the first non-NULL value evaluated against the nth row (offset) in its ordered frame.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
nth_value (x, offset)
|
||||
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
||||
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
|
||||
FROM table_name
|
||||
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
|
||||
```
|
||||
|
||||
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `x` — Column name.
|
||||
- `offset` — nth row to evaluate current row against.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The first non-NULL value evaluated against the nth row (offset) in its ordered frame.
|
||||
|
||||
**Example**
|
||||
|
||||
In this example the `nth-value` function is used to find the third-highest salary from a fictional dataset of salaries of Premier League football players.
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS salaries;
|
||||
CREATE TABLE salaries
|
||||
(
|
||||
`team` String,
|
||||
`player` String,
|
||||
`salary` UInt32,
|
||||
`position` String
|
||||
)
|
||||
Engine = Memory;
|
||||
|
||||
INSERT INTO salaries FORMAT Values
|
||||
('Port Elizabeth Barbarians', 'Gary Chen', 195000, 'F'),
|
||||
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
|
||||
('Port Elizabeth Barbarians', 'Michael Stanley', 100000, 'D'),
|
||||
('New Coreystad Archdukes', 'Scott Harrison', 180000, 'D'),
|
||||
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M'),
|
||||
('South Hampton Seagulls', 'Douglas Benson', 150000, 'M'),
|
||||
('South Hampton Seagulls', 'James Henderson', 140000, 'M');
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT player, salary, nth_value(player,3) OVER(ORDER BY salary DESC) AS third_highest_salary FROM salaries;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─player──────────┬─salary─┬─third_highest_salary─┐
|
||||
1. │ Gary Chen │ 195000 │ │
|
||||
2. │ Robert George │ 195000 │ │
|
||||
3. │ Charles Juarez │ 190000 │ Charles Juarez │
|
||||
4. │ Scott Harrison │ 180000 │ Charles Juarez │
|
||||
5. │ Douglas Benson │ 150000 │ Charles Juarez │
|
||||
6. │ James Henderson │ 140000 │ Charles Juarez │
|
||||
7. │ Michael Stanley │ 100000 │ Charles Juarez │
|
||||
└─────────────────┴────────┴──────────────────────┘
|
||||
```
|
74
docs/en/sql-reference/window-functions/rank.md
Normal file
74
docs/en/sql-reference/window-functions/rank.md
Normal file
@ -0,0 +1,74 @@
|
||||
---
|
||||
slug: /en/sql-reference/window-functions/rank
|
||||
sidebar_label: rank
|
||||
sidebar_position: 6
|
||||
---
|
||||
|
||||
# rank
|
||||
|
||||
Ranks the current row within its partition with gaps. In other words, if the value of any row it encounters is equal to the value of a previous row then it will receive the same rank as that previous row.
|
||||
The rank of the next row is then equal to the rank of the previous row plus a gap equal to the number of times the previous rank was given.
|
||||
|
||||
The [dense_rank](./dense_rank.md) function provides the same behaviour but without gaps in ranking.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
rank (column_name)
|
||||
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
||||
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
|
||||
FROM table_name
|
||||
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
|
||||
```
|
||||
|
||||
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A number for the current row within its partition, including gaps. [UInt64](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
The following example is based on the example provided in the video instructional [Ranking window functions in ClickHouse](https://youtu.be/Yku9mmBYm_4?si=XIMu1jpYucCQEoXA).
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
CREATE TABLE salaries
|
||||
(
|
||||
`team` String,
|
||||
`player` String,
|
||||
`salary` UInt32,
|
||||
`position` String
|
||||
)
|
||||
Engine = Memory;
|
||||
|
||||
INSERT INTO salaries FORMAT Values
|
||||
('Port Elizabeth Barbarians', 'Gary Chen', 195000, 'F'),
|
||||
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
|
||||
('Port Elizabeth Barbarians', 'Michael Stanley', 150000, 'D'),
|
||||
('New Coreystad Archdukes', 'Scott Harrison', 150000, 'D'),
|
||||
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M'),
|
||||
('South Hampton Seagulls', 'Douglas Benson', 150000, 'M'),
|
||||
('South Hampton Seagulls', 'James Henderson', 140000, 'M');
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT player, salary,
|
||||
rank() OVER (ORDER BY salary DESC) AS rank
|
||||
FROM salaries;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─player──────────┬─salary─┬─rank─┐
|
||||
1. │ Gary Chen │ 195000 │ 1 │
|
||||
2. │ Robert George │ 195000 │ 1 │
|
||||
3. │ Charles Juarez │ 190000 │ 3 │
|
||||
4. │ Douglas Benson │ 150000 │ 4 │
|
||||
5. │ Michael Stanley │ 150000 │ 4 │
|
||||
6. │ Scott Harrison │ 150000 │ 4 │
|
||||
7. │ James Henderson │ 140000 │ 7 │
|
||||
└─────────────────┴────────┴──────┘
|
||||
```
|
67
docs/en/sql-reference/window-functions/row_number.md
Normal file
67
docs/en/sql-reference/window-functions/row_number.md
Normal file
@ -0,0 +1,67 @@
|
||||
---
|
||||
slug: /en/sql-reference/window-functions/row_number
|
||||
sidebar_label: row_number
|
||||
sidebar_position: 2
|
||||
---
|
||||
|
||||
# row_number
|
||||
|
||||
Numbers the current row within its partition starting from 1.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
row_number (column_name)
|
||||
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
||||
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
|
||||
FROM table_name
|
||||
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
|
||||
```
|
||||
|
||||
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A number for the current row within its partition. [UInt64](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
The following example is based on the example provided in the video instructional [Ranking window functions in ClickHouse](https://youtu.be/Yku9mmBYm_4?si=XIMu1jpYucCQEoXA).
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
CREATE TABLE salaries
|
||||
(
|
||||
`team` String,
|
||||
`player` String,
|
||||
`salary` UInt32,
|
||||
`position` String
|
||||
)
|
||||
Engine = Memory;
|
||||
|
||||
INSERT INTO salaries FORMAT Values
|
||||
('Port Elizabeth Barbarians', 'Gary Chen', 195000, 'F'),
|
||||
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
|
||||
('Port Elizabeth Barbarians', 'Michael Stanley', 150000, 'D'),
|
||||
('New Coreystad Archdukes', 'Scott Harrison', 150000, 'D'),
|
||||
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M');
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT player, salary,
|
||||
row_number() OVER (ORDER BY salary DESC) AS row_number
|
||||
FROM salaries;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─player──────────┬─salary─┬─row_number─┐
|
||||
1. │ Gary Chen │ 195000 │ 1 │
|
||||
2. │ Robert George │ 195000 │ 2 │
|
||||
3. │ Charles Juarez │ 190000 │ 3 │
|
||||
4. │ Scott Harrison │ 150000 │ 4 │
|
||||
5. │ Michael Stanley │ 150000 │ 5 │
|
||||
└─────────────────┴────────┴────────────┘
|
||||
```
|
@ -35,10 +35,9 @@ disable = '''
|
||||
broad-except,
|
||||
bare-except,
|
||||
no-else-return,
|
||||
global-statement
|
||||
global-statement,
|
||||
'''
|
||||
|
||||
[tool.pylint.SIMILARITIES]
|
||||
# due to SQL
|
||||
min-similarity-lines=1000
|
||||
|
||||
|
@ -38,10 +38,19 @@ namespace ErrorCodes
|
||||
extern const int CANNOT_MREMAP;
|
||||
}
|
||||
|
||||
void abortOnFailedAssertion(const String & description, void * const * trace, size_t trace_offset, size_t trace_size)
|
||||
{
|
||||
auto & logger = Poco::Logger::root();
|
||||
LOG_FATAL(&logger, "Logical error: '{}'.", description);
|
||||
if (trace)
|
||||
LOG_FATAL(&logger, "Stack trace (when copying this message, always include the lines below):\n\n{}", StackTrace::toString(trace, trace_offset, trace_size));
|
||||
abort();
|
||||
}
|
||||
|
||||
void abortOnFailedAssertion(const String & description)
|
||||
{
|
||||
LOG_FATAL(&Poco::Logger::root(), "Logical error: '{}'.", description);
|
||||
abort();
|
||||
StackTrace st;
|
||||
abortOnFailedAssertion(description, st.getFramePointers().data(), st.getOffset(), st.getSize());
|
||||
}
|
||||
|
||||
bool terminate_on_any_exception = false;
|
||||
@ -58,7 +67,7 @@ void handle_error_code(const std::string & msg, int code, bool remote, const Exc
|
||||
#ifdef ABORT_ON_LOGICAL_ERROR
|
||||
if (code == ErrorCodes::LOGICAL_ERROR)
|
||||
{
|
||||
abortOnFailedAssertion(msg);
|
||||
abortOnFailedAssertion(msg, trace.data(), 0, trace.size());
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -25,8 +25,6 @@ namespace DB
|
||||
|
||||
class AtomicLogger;
|
||||
|
||||
[[noreturn]] void abortOnFailedAssertion(const String & description);
|
||||
|
||||
/// This flag can be set for testing purposes - to check that no exceptions are thrown.
|
||||
extern bool terminate_on_any_exception;
|
||||
|
||||
@ -167,6 +165,8 @@ protected:
|
||||
mutable std::vector<StackTrace::FramePointers> capture_thread_frame_pointers;
|
||||
};
|
||||
|
||||
[[noreturn]] void abortOnFailedAssertion(const String & description, void * const * trace, size_t trace_offset, size_t trace_size);
|
||||
[[noreturn]] void abortOnFailedAssertion(const String & description);
|
||||
|
||||
std::string getExceptionStackTraceString(const std::exception & e);
|
||||
std::string getExceptionStackTraceString(std::exception_ptr e);
|
||||
|
@ -14,6 +14,7 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int CANNOT_ALLOCATE_MEMORY;
|
||||
|
@ -235,7 +235,7 @@ bool NamedCollectionFactory::loadIfNot(std::lock_guard<std::mutex> & lock)
|
||||
loadFromConfig(context->getConfigRef(), lock);
|
||||
loadFromSQL(lock);
|
||||
|
||||
if (metadata_storage->supportsPeriodicUpdate())
|
||||
if (metadata_storage->isReplicated())
|
||||
{
|
||||
update_task = context->getSchedulePool().createTask("NamedCollectionsMetadataStorage", [this]{ updateFunc(); });
|
||||
update_task->activate();
|
||||
@ -357,6 +357,13 @@ void NamedCollectionFactory::reloadFromSQL()
|
||||
add(std::move(collections), lock);
|
||||
}
|
||||
|
||||
bool NamedCollectionFactory::usesReplicatedStorage()
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
loadIfNot(lock);
|
||||
return metadata_storage->isReplicated();
|
||||
}
|
||||
|
||||
void NamedCollectionFactory::updateFunc()
|
||||
{
|
||||
LOG_TRACE(log, "Named collections background updating thread started");
|
||||
|
@ -34,6 +34,8 @@ public:
|
||||
|
||||
void updateFromSQL(const ASTAlterNamedCollectionQuery & query);
|
||||
|
||||
bool usesReplicatedStorage();
|
||||
|
||||
void loadIfNot();
|
||||
|
||||
void shutdown();
|
||||
|
@ -67,7 +67,7 @@ public:
|
||||
|
||||
virtual bool removeIfExists(const std::string & path) = 0;
|
||||
|
||||
virtual bool supportsPeriodicUpdate() const = 0;
|
||||
virtual bool isReplicated() const = 0;
|
||||
|
||||
virtual bool waitUpdate(size_t /* timeout */) { return false; }
|
||||
};
|
||||
@ -89,7 +89,7 @@ public:
|
||||
|
||||
~LocalStorage() override = default;
|
||||
|
||||
bool supportsPeriodicUpdate() const override { return false; }
|
||||
bool isReplicated() const override { return false; }
|
||||
|
||||
std::vector<std::string> list() const override
|
||||
{
|
||||
@ -221,7 +221,7 @@ public:
|
||||
|
||||
~ZooKeeperStorage() override = default;
|
||||
|
||||
bool supportsPeriodicUpdate() const override { return true; }
|
||||
bool isReplicated() const override { return true; }
|
||||
|
||||
/// Return true if children changed.
|
||||
bool waitUpdate(size_t timeout) override
|
||||
@ -465,14 +465,14 @@ void NamedCollectionsMetadataStorage::writeCreateQuery(const ASTCreateNamedColle
|
||||
storage->write(getFileName(query.collection_name), serializeAST(*normalized_query), replace);
|
||||
}
|
||||
|
||||
bool NamedCollectionsMetadataStorage::supportsPeriodicUpdate() const
|
||||
bool NamedCollectionsMetadataStorage::isReplicated() const
|
||||
{
|
||||
return storage->supportsPeriodicUpdate();
|
||||
return storage->isReplicated();
|
||||
}
|
||||
|
||||
bool NamedCollectionsMetadataStorage::waitUpdate()
|
||||
{
|
||||
if (!storage->supportsPeriodicUpdate())
|
||||
if (!storage->isReplicated())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Periodic updates are not supported");
|
||||
|
||||
const auto & config = Context::getGlobalContextInstance()->getConfigRef();
|
||||
|
@ -30,7 +30,7 @@ public:
|
||||
/// Return true if update was made
|
||||
bool waitUpdate();
|
||||
|
||||
bool supportsPeriodicUpdate() const;
|
||||
bool isReplicated() const;
|
||||
|
||||
private:
|
||||
class INamedCollectionsStorage;
|
||||
|
@ -545,7 +545,7 @@ std::string StackTrace::toString() const
|
||||
return toStringCached(frame_pointers, offset, size);
|
||||
}
|
||||
|
||||
std::string StackTrace::toString(void ** frame_pointers_raw, size_t offset, size_t size)
|
||||
std::string StackTrace::toString(void * const * frame_pointers_raw, size_t offset, size_t size)
|
||||
{
|
||||
__msan_unpoison(frame_pointers_raw, size * sizeof(*frame_pointers_raw));
|
||||
|
||||
|
@ -59,7 +59,7 @@ public:
|
||||
const FramePointers & getFramePointers() const { return frame_pointers; }
|
||||
std::string toString() const;
|
||||
|
||||
static std::string toString(void ** frame_pointers, size_t offset, size_t size);
|
||||
static std::string toString(void * const * frame_pointers, size_t offset, size_t size);
|
||||
static void dropCache();
|
||||
|
||||
/// @param fatal - if true, will process inline frames (slower)
|
||||
|
@ -25,7 +25,7 @@ namespace DB
|
||||
template <typename To, typename From>
|
||||
inline To assert_cast(From && from)
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
#ifdef ABORT_ON_LOGICAL_ERROR
|
||||
try
|
||||
{
|
||||
if constexpr (std::is_pointer_v<To>)
|
||||
|
@ -228,7 +228,6 @@ Pool::Entry Pool::tryGet()
|
||||
for (auto connection_it = connections.cbegin(); connection_it != connections.cend();)
|
||||
{
|
||||
Connection * connection_ptr = *connection_it;
|
||||
/// Fixme: There is a race condition here b/c we do not synchronize with Pool::Entry's copy-assignment operator
|
||||
if (connection_ptr->ref_count == 0)
|
||||
{
|
||||
{
|
||||
|
@ -64,17 +64,6 @@ public:
|
||||
decrementRefCount();
|
||||
}
|
||||
|
||||
Entry & operator= (const Entry & src) /// NOLINT
|
||||
{
|
||||
pool = src.pool;
|
||||
if (data)
|
||||
decrementRefCount();
|
||||
data = src.data;
|
||||
if (data)
|
||||
incrementRefCount();
|
||||
return * this;
|
||||
}
|
||||
|
||||
bool isNull() const
|
||||
{
|
||||
return data == nullptr;
|
||||
|
@ -13,13 +13,11 @@ mysqlxx::Pool::Entry getWithFailover(mysqlxx::Pool & connections_pool)
|
||||
|
||||
constexpr size_t max_tries = 3;
|
||||
|
||||
mysqlxx::Pool::Entry worker_connection;
|
||||
|
||||
for (size_t try_no = 1; try_no <= max_tries; ++try_no)
|
||||
{
|
||||
try
|
||||
{
|
||||
worker_connection = connections_pool.tryGet();
|
||||
mysqlxx::Pool::Entry worker_connection = connections_pool.tryGet();
|
||||
|
||||
if (!worker_connection.isNull())
|
||||
{
|
||||
|
@ -346,6 +346,7 @@ class IColumn;
|
||||
\
|
||||
M(Bool, ignore_on_cluster_for_replicated_udf_queries, false, "Ignore ON CLUSTER clause for replicated UDF management queries.", 0) \
|
||||
M(Bool, ignore_on_cluster_for_replicated_access_entities_queries, false, "Ignore ON CLUSTER clause for replicated access entities management queries.", 0) \
|
||||
M(Bool, ignore_on_cluster_for_replicated_named_collections_queries, false, "Ignore ON CLUSTER clause for replicated named collections management queries.", 0) \
|
||||
/** Settings for testing hedged requests */ \
|
||||
M(Milliseconds, sleep_in_send_tables_status_ms, 0, "Time to sleep in sending tables status response in TCPHandler", 0) \
|
||||
M(Milliseconds, sleep_in_send_data_ms, 0, "Time to sleep in sending data in TCPHandler", 0) \
|
||||
|
@ -77,6 +77,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
||||
{"azure_sdk_max_retries", 10, 10, "Maximum number of retries in azure sdk"},
|
||||
{"azure_sdk_retry_initial_backoff_ms", 10, 10, "Minimal backoff between retries in azure sdk"},
|
||||
{"azure_sdk_retry_max_backoff_ms", 1000, 1000, "Maximal backoff between retries in azure sdk"},
|
||||
{"ignore_on_cluster_for_replicated_named_collections_queries", false, false, "Ignore ON CLUSTER clause for replicated named collections management queries."},
|
||||
{"postgresql_connection_attempt_timeout", 2, 2, "Allow to control 'connect_timeout' parameter of PostgreSQL connection."},
|
||||
{"postgresql_connection_pool_retries", 2, 2, "Allow to control the number of retries in PostgreSQL connection pool."}
|
||||
}},
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/ASTSubquery.h>
|
||||
#include <Parsers/ASTSelectWithUnionQuery.h>
|
||||
#include <Parsers/ASTTTLElement.h>
|
||||
#include <Poco/String.h>
|
||||
@ -211,6 +212,13 @@ void DDLLoadingDependencyVisitor::extractTableNameFromArgument(const ASTFunction
|
||||
qualified_name.database = table_identifier->getDatabaseName();
|
||||
qualified_name.table = table_identifier->shortName();
|
||||
}
|
||||
else if (arg->as<ASTSubquery>())
|
||||
{
|
||||
/// Allow IN subquery.
|
||||
/// Do not add tables from the subquery into dependencies,
|
||||
/// because CREATE will succeed anyway.
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(false);
|
||||
|
@ -107,12 +107,24 @@ void DatabaseAtomic::attachTable(ContextPtr /* context_ */, const String & name,
|
||||
|
||||
StoragePtr DatabaseAtomic::detachTable(ContextPtr /* context */, const String & name)
|
||||
{
|
||||
// it is important to call the destructors of not_in_use without
|
||||
// locked mutex to avoid potential deadlock.
|
||||
DetachedTables not_in_use;
|
||||
std::lock_guard lock(mutex);
|
||||
auto table = DatabaseOrdinary::detachTableUnlocked(name);
|
||||
table_name_to_path.erase(name);
|
||||
detached_tables.emplace(table->getStorageID().uuid, table);
|
||||
not_in_use = cleanupDetachedTables();
|
||||
StoragePtr table;
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
table = DatabaseOrdinary::detachTableUnlocked(name);
|
||||
table_name_to_path.erase(name);
|
||||
detached_tables.emplace(table->getStorageID().uuid, table);
|
||||
not_in_use = cleanupDetachedTables();
|
||||
}
|
||||
|
||||
if (!not_in_use.empty())
|
||||
{
|
||||
not_in_use.clear();
|
||||
LOG_DEBUG(log, "Finished removing not used detached tables");
|
||||
}
|
||||
|
||||
return table;
|
||||
}
|
||||
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <Common/randomNumber.h>
|
||||
#include <Common/setThreadName.h>
|
||||
#include <base/sleep.h>
|
||||
#include <base/scope_guard.h>
|
||||
#include <boost/algorithm/string/split.hpp>
|
||||
#include <boost/algorithm/string/trim.hpp>
|
||||
#include <Parsers/CommonParsers.h>
|
||||
@ -532,13 +533,17 @@ static inline void dumpDataForTables(
|
||||
bool MaterializedMySQLSyncThread::prepareSynchronized(MaterializeMetadata & metadata)
|
||||
{
|
||||
bool opened_transaction = false;
|
||||
mysqlxx::PoolWithFailover::Entry connection;
|
||||
|
||||
while (!isCancelled())
|
||||
{
|
||||
try
|
||||
{
|
||||
connection = pool.tryGet();
|
||||
mysqlxx::PoolWithFailover::Entry connection = pool.tryGet();
|
||||
SCOPE_EXIT({
|
||||
if (opened_transaction)
|
||||
connection->query("ROLLBACK").execute();
|
||||
});
|
||||
|
||||
if (connection.isNull())
|
||||
{
|
||||
if (settings->max_wait_time_when_mysql_unavailable < 0)
|
||||
@ -602,9 +607,6 @@ bool MaterializedMySQLSyncThread::prepareSynchronized(MaterializeMetadata & meta
|
||||
{
|
||||
tryLogCurrentException(log);
|
||||
|
||||
if (opened_transaction)
|
||||
connection->query("ROLLBACK").execute();
|
||||
|
||||
if (settings->max_wait_time_when_mysql_unavailable < 0)
|
||||
throw;
|
||||
|
||||
|
1660
src/Formats/JSONExtractTree.cpp
Normal file
1660
src/Formats/JSONExtractTree.cpp
Normal file
File diff suppressed because it is too large
Load Diff
41
src/Formats/JSONExtractTree.h
Normal file
41
src/Formats/JSONExtractTree.h
Normal file
@ -0,0 +1,41 @@
|
||||
#pragma once
|
||||
#include <DataTypes/IDataType.h>
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Formats/FormatSettings.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
struct JSONExtractInsertSettings
|
||||
{
|
||||
/// If false, JSON boolean values won't be inserted into columns with integer types
|
||||
/// It's used in JSONExtractInt64/JSONExtractUInt64/... functions.
|
||||
bool convert_bool_to_integer = true;
|
||||
/// If true, when complex type like Array/Map has both valid and invalid elements,
|
||||
/// the default value will be inserted on invalid elements.
|
||||
/// For example, if we have [1, "hello", 2] and type Array(UInt32),
|
||||
/// we will insert [1, 0, 2] in the column. Used in all JSONExtract functions.
|
||||
bool insert_default_on_invalid_elements_in_complex_types = false;
|
||||
};
|
||||
|
||||
template <typename JSONParser>
|
||||
class JSONExtractTreeNode
|
||||
{
|
||||
public:
|
||||
JSONExtractTreeNode() = default;
|
||||
virtual ~JSONExtractTreeNode() = default;
|
||||
virtual bool insertResultToColumn(IColumn &, const typename JSONParser::Element &, const JSONExtractInsertSettings & insert_setting, const FormatSettings & format_settings, String & error) const = 0;
|
||||
};
|
||||
|
||||
/// Build a tree for insertion JSON element into a column with provided data type.
|
||||
template <typename JSONParser>
|
||||
std::unique_ptr<JSONExtractTreeNode<JSONParser>> buildJSONExtractTree(const DataTypePtr & type, const char * source_for_exception_message);
|
||||
|
||||
template <typename JSONParser>
|
||||
void jsonElementToString(const typename JSONParser::Element & element, WriteBuffer & buf, const FormatSettings & format_settings);
|
||||
|
||||
template <typename JSONParser, typename NumberType>
|
||||
bool tryGetNumericValueFromJSONElement(NumberType & value, const typename JSONParser::Element & element, bool convert_bool_to_integer, String & error);
|
||||
|
||||
}
|
@ -225,19 +225,6 @@ namespace
|
||||
Paths paths;
|
||||
};
|
||||
|
||||
bool checkIfTypesAreEqual(const DataTypes & types)
|
||||
{
|
||||
if (types.empty())
|
||||
return true;
|
||||
|
||||
for (size_t i = 1; i < types.size(); ++i)
|
||||
{
|
||||
if (!types[0]->equals(*types[i]))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void updateTypeIndexes(DataTypes & data_types, TypeIndexesSet & type_indexes)
|
||||
{
|
||||
type_indexes.clear();
|
||||
@ -272,24 +259,31 @@ namespace
|
||||
type_indexes.erase(TypeIndex::Nothing);
|
||||
}
|
||||
|
||||
/// If we have both Int64 and UInt64, convert all Int64 to UInt64,
|
||||
/// If we have both Int64 and UInt64, convert all not-negative Int64 to UInt64,
|
||||
/// because UInt64 is inferred only in case of Int64 overflow.
|
||||
void transformIntegers(DataTypes & data_types, TypeIndexesSet & type_indexes)
|
||||
void transformIntegers(DataTypes & data_types, TypeIndexesSet & type_indexes, JSONInferenceInfo * json_info)
|
||||
{
|
||||
if (!type_indexes.contains(TypeIndex::Int64) || !type_indexes.contains(TypeIndex::UInt64))
|
||||
return;
|
||||
|
||||
bool have_negative_integers = false;
|
||||
for (auto & type : data_types)
|
||||
{
|
||||
if (WhichDataType(type).isInt64())
|
||||
type = std::make_shared<DataTypeUInt64>();
|
||||
{
|
||||
bool is_negative = json_info && json_info->negative_integers.contains(type.get());
|
||||
have_negative_integers |= is_negative;
|
||||
if (!is_negative)
|
||||
type = std::make_shared<DataTypeUInt64>();
|
||||
}
|
||||
}
|
||||
|
||||
type_indexes.erase(TypeIndex::Int64);
|
||||
if (!have_negative_integers)
|
||||
type_indexes.erase(TypeIndex::Int64);
|
||||
}
|
||||
|
||||
/// If we have both Int64 and Float64 types, convert all Int64 to Float64.
|
||||
void transformIntegersAndFloatsToFloats(DataTypes & data_types, TypeIndexesSet & type_indexes)
|
||||
void transformIntegersAndFloatsToFloats(DataTypes & data_types, TypeIndexesSet & type_indexes, JSONInferenceInfo * json_info)
|
||||
{
|
||||
bool have_floats = type_indexes.contains(TypeIndex::Float64);
|
||||
bool have_integers = type_indexes.contains(TypeIndex::Int64) || type_indexes.contains(TypeIndex::UInt64);
|
||||
@ -300,7 +294,12 @@ namespace
|
||||
{
|
||||
WhichDataType which(type);
|
||||
if (which.isInt64() || which.isUInt64())
|
||||
type = std::make_shared<DataTypeFloat64>();
|
||||
{
|
||||
auto new_type = std::make_shared<DataTypeFloat64>();
|
||||
if (json_info && json_info->numbers_parsed_from_json_strings.erase(type.get()))
|
||||
json_info->numbers_parsed_from_json_strings.insert(new_type.get());
|
||||
type = new_type;
|
||||
}
|
||||
}
|
||||
|
||||
type_indexes.erase(TypeIndex::Int64);
|
||||
@ -635,9 +634,9 @@ namespace
|
||||
if (settings.try_infer_integers)
|
||||
{
|
||||
/// Transform Int64 to UInt64 if needed.
|
||||
transformIntegers(data_types, type_indexes);
|
||||
transformIntegers(data_types, type_indexes, json_info);
|
||||
/// Transform integers to floats if needed.
|
||||
transformIntegersAndFloatsToFloats(data_types, type_indexes);
|
||||
transformIntegersAndFloatsToFloats(data_types, type_indexes, json_info);
|
||||
}
|
||||
|
||||
/// Transform Date to DateTime or both to String if needed.
|
||||
@ -887,7 +886,7 @@ namespace
|
||||
}
|
||||
|
||||
template <bool is_json>
|
||||
DataTypePtr tryInferNumber(ReadBuffer & buf, const FormatSettings & settings)
|
||||
DataTypePtr tryInferNumber(ReadBuffer & buf, const FormatSettings & settings, JSONInferenceInfo * json_info)
|
||||
{
|
||||
if (buf.eof())
|
||||
return nullptr;
|
||||
@ -911,7 +910,12 @@ namespace
|
||||
Int64 tmp_int;
|
||||
buf.position() = number_start;
|
||||
if (tryReadIntText(tmp_int, buf))
|
||||
return std::make_shared<DataTypeInt64>();
|
||||
{
|
||||
auto type = std::make_shared<DataTypeInt64>();
|
||||
if (json_info && tmp_int < 0)
|
||||
json_info->negative_integers.insert(type.get());
|
||||
return type;
|
||||
}
|
||||
|
||||
/// In case of Int64 overflow we can try to infer UInt64.
|
||||
UInt64 tmp_uint;
|
||||
@ -934,7 +938,12 @@ namespace
|
||||
|
||||
Int64 tmp_int;
|
||||
if (tryReadIntText(tmp_int, peekable_buf))
|
||||
return std::make_shared<DataTypeInt64>();
|
||||
{
|
||||
auto type = std::make_shared<DataTypeInt64>();
|
||||
if (json_info && tmp_int < 0)
|
||||
json_info->negative_integers.insert(type.get());
|
||||
return type;
|
||||
}
|
||||
peekable_buf.rollbackToCheckpoint(/* drop= */ true);
|
||||
|
||||
/// In case of Int64 overflow we can try to infer UInt64.
|
||||
@ -952,7 +961,7 @@ namespace
|
||||
}
|
||||
|
||||
template <bool is_json>
|
||||
DataTypePtr tryInferNumberFromStringImpl(std::string_view field, const FormatSettings & settings)
|
||||
DataTypePtr tryInferNumberFromStringImpl(std::string_view field, const FormatSettings & settings, JSONInferenceInfo * json_inference_info = nullptr)
|
||||
{
|
||||
ReadBufferFromString buf(field);
|
||||
|
||||
@ -960,7 +969,12 @@ namespace
|
||||
{
|
||||
Int64 tmp_int;
|
||||
if (tryReadIntText(tmp_int, buf) && buf.eof())
|
||||
return std::make_shared<DataTypeInt64>();
|
||||
{
|
||||
auto type = std::make_shared<DataTypeInt64>();
|
||||
if (json_inference_info && tmp_int < 0)
|
||||
json_inference_info->negative_integers.insert(type.get());
|
||||
return type;
|
||||
}
|
||||
|
||||
/// We can safely get back to the start of buffer, because we read from a string and we didn't reach eof.
|
||||
buf.position() = buf.buffer().begin();
|
||||
@ -1011,7 +1025,7 @@ namespace
|
||||
{
|
||||
if (settings.json.try_infer_numbers_from_strings)
|
||||
{
|
||||
if (auto number_type = tryInferNumberFromStringImpl<true>(field, settings))
|
||||
if (auto number_type = tryInferNumberFromStringImpl<true>(field, settings, json_info))
|
||||
{
|
||||
json_info->numbers_parsed_from_json_strings.insert(number_type.get());
|
||||
return number_type;
|
||||
@ -1254,10 +1268,23 @@ namespace
|
||||
}
|
||||
|
||||
/// Number
|
||||
return tryInferNumber<is_json>(buf, settings);
|
||||
return tryInferNumber<is_json>(buf, settings, json_info);
|
||||
}
|
||||
}
|
||||
|
||||
bool checkIfTypesAreEqual(const DataTypes & types)
|
||||
{
|
||||
if (types.empty())
|
||||
return true;
|
||||
|
||||
for (size_t i = 1; i < types.size(); ++i)
|
||||
{
|
||||
if (!types[0]->equals(*types[i]))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void transformInferredTypesIfNeeded(DataTypePtr & first, DataTypePtr & second, const FormatSettings & settings)
|
||||
{
|
||||
DataTypes types = {first, second};
|
||||
@ -1275,6 +1302,11 @@ void transformInferredJSONTypesIfNeeded(
|
||||
second = std::move(types[1]);
|
||||
}
|
||||
|
||||
void transformInferredJSONTypesIfNeeded(DataTypes & types, const FormatSettings & settings, JSONInferenceInfo * json_info)
|
||||
{
|
||||
transformInferredTypesIfNeededImpl<true>(types, settings, json_info);
|
||||
}
|
||||
|
||||
void transformInferredJSONTypesFromDifferentFilesIfNeeded(DataTypePtr & first, DataTypePtr & second, const FormatSettings & settings)
|
||||
{
|
||||
JSONInferenceInfo json_info;
|
||||
@ -1396,6 +1428,12 @@ DataTypePtr tryInferNumberFromString(std::string_view field, const FormatSetting
|
||||
return tryInferNumberFromStringImpl<false>(field, settings);
|
||||
}
|
||||
|
||||
DataTypePtr tryInferJSONNumberFromString(std::string_view field, const FormatSettings & settings, JSONInferenceInfo * json_info)
|
||||
{
|
||||
return tryInferNumberFromStringImpl<false>(field, settings, json_info);
|
||||
|
||||
}
|
||||
|
||||
DataTypePtr tryInferDateOrDateTimeFromString(std::string_view field, const FormatSettings & settings)
|
||||
{
|
||||
if (settings.try_infer_dates && tryInferDate(field))
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <DataTypes/IDataType.h>
|
||||
#include <IO/ReadBuffer.h>
|
||||
#include <Formats/FormatSettings.h>
|
||||
|
||||
#include <vector>
|
||||
|
||||
@ -18,6 +19,11 @@ struct JSONInferenceInfo
|
||||
/// We store numbers that were parsed from strings.
|
||||
/// It's used in types transformation to change such numbers back to string if needed.
|
||||
std::unordered_set<const IDataType *> numbers_parsed_from_json_strings;
|
||||
/// Store integer types that were inferred from negative numbers.
|
||||
/// It's used to determine common type for Int64 and UInt64
|
||||
/// TODO: check it not only in JSON formats.
|
||||
std::unordered_set<const IDataType *> negative_integers;
|
||||
|
||||
/// Indicates if currently we are inferring type for Map/Object key.
|
||||
bool is_object_key = false;
|
||||
/// When we transform types for the same column from different files
|
||||
@ -48,6 +54,7 @@ DataTypePtr tryInferDateOrDateTimeFromString(std::string_view field, const Forma
|
||||
/// Try to parse a number value from a string. By default, it tries to parse Float64,
|
||||
/// but if setting try_infer_integers is enabled, it also tries to parse Int64.
|
||||
DataTypePtr tryInferNumberFromString(std::string_view field, const FormatSettings & settings);
|
||||
DataTypePtr tryInferJSONNumberFromString(std::string_view field, const FormatSettings & settings, JSONInferenceInfo * json_info);
|
||||
|
||||
/// It takes two types inferred for the same column and tries to transform them to a common type if possible.
|
||||
/// It's also used when we try to infer some not ordinary types from another types.
|
||||
@ -77,6 +84,7 @@ void transformInferredTypesIfNeeded(DataTypePtr & first, DataTypePtr & second, c
|
||||
/// Example 2:
|
||||
/// We merge DataTypeJSONPaths types to a single DataTypeJSONPaths type with union of all JSON paths.
|
||||
void transformInferredJSONTypesIfNeeded(DataTypePtr & first, DataTypePtr & second, const FormatSettings & settings, JSONInferenceInfo * json_info);
|
||||
void transformInferredJSONTypesIfNeeded(DataTypes & types, const FormatSettings & settings, JSONInferenceInfo * json_info);
|
||||
|
||||
/// Make final transform for types inferred in JSON format. It does 3 types of transformation:
|
||||
/// 1) Checks if type is unnamed Tuple(...), tries to transform nested types to find a common type for them and if all nested types
|
||||
@ -107,4 +115,6 @@ NamesAndTypesList getNamesAndRecursivelyNullableTypes(const Block & header);
|
||||
/// Check if type contains Nothing, like Array(Tuple(Nullable(Nothing), String))
|
||||
bool checkIfTypeIsComplete(const DataTypePtr & type);
|
||||
|
||||
bool checkIfTypesAreEqual(const DataTypes & types);
|
||||
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -4,6 +4,7 @@
|
||||
#include <Access/ContextAccess.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/executeDDLQueryOnCluster.h>
|
||||
#include <Interpreters/removeOnClusterClauseIfNeeded.h>
|
||||
#include <Common/NamedCollections/NamedCollectionsFactory.h>
|
||||
|
||||
|
||||
@ -13,14 +14,16 @@ namespace DB
|
||||
BlockIO InterpreterAlterNamedCollectionQuery::execute()
|
||||
{
|
||||
auto current_context = getContext();
|
||||
const auto & query = query_ptr->as<const ASTAlterNamedCollectionQuery &>();
|
||||
|
||||
const auto updated_query = removeOnClusterClauseIfNeeded(query_ptr, getContext());
|
||||
const auto & query = updated_query->as<const ASTAlterNamedCollectionQuery &>();
|
||||
|
||||
current_context->checkAccess(AccessType::ALTER_NAMED_COLLECTION, query.collection_name);
|
||||
|
||||
if (!query.cluster.empty())
|
||||
{
|
||||
DDLQueryOnClusterParams params;
|
||||
return executeDDLQueryOnCluster(query_ptr, current_context, params);
|
||||
return executeDDLQueryOnCluster(updated_query, current_context, params);
|
||||
}
|
||||
|
||||
NamedCollectionFactory::instance().updateFromSQL(query);
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <Access/ContextAccess.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/executeDDLQueryOnCluster.h>
|
||||
#include <Interpreters/removeOnClusterClauseIfNeeded.h>
|
||||
#include <Common/NamedCollections/NamedCollectionsFactory.h>
|
||||
|
||||
|
||||
@ -13,14 +14,16 @@ namespace DB
|
||||
BlockIO InterpreterCreateNamedCollectionQuery::execute()
|
||||
{
|
||||
auto current_context = getContext();
|
||||
const auto & query = query_ptr->as<const ASTCreateNamedCollectionQuery &>();
|
||||
|
||||
const auto updated_query = removeOnClusterClauseIfNeeded(query_ptr, getContext());
|
||||
const auto & query = updated_query->as<const ASTCreateNamedCollectionQuery &>();
|
||||
|
||||
current_context->checkAccess(AccessType::CREATE_NAMED_COLLECTION, query.collection_name);
|
||||
|
||||
if (!query.cluster.empty())
|
||||
{
|
||||
DDLQueryOnClusterParams params;
|
||||
return executeDDLQueryOnCluster(query_ptr, current_context, params);
|
||||
return executeDDLQueryOnCluster(updated_query, current_context, params);
|
||||
}
|
||||
|
||||
NamedCollectionFactory::instance().createFromSQL(query);
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <Access/ContextAccess.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/executeDDLQueryOnCluster.h>
|
||||
#include <Interpreters/removeOnClusterClauseIfNeeded.h>
|
||||
#include <Common/NamedCollections/NamedCollectionsFactory.h>
|
||||
|
||||
|
||||
@ -13,14 +14,16 @@ namespace DB
|
||||
BlockIO InterpreterDropNamedCollectionQuery::execute()
|
||||
{
|
||||
auto current_context = getContext();
|
||||
const auto & query = query_ptr->as<const ASTDropNamedCollectionQuery &>();
|
||||
|
||||
const auto updated_query = removeOnClusterClauseIfNeeded(query_ptr, getContext());
|
||||
const auto & query = updated_query->as<const ASTDropNamedCollectionQuery &>();
|
||||
|
||||
current_context->checkAccess(AccessType::DROP_NAMED_COLLECTION, query.collection_name);
|
||||
|
||||
if (!query.cluster.empty())
|
||||
{
|
||||
DDLQueryOnClusterParams params;
|
||||
return executeDDLQueryOnCluster(query_ptr, current_context, params);
|
||||
return executeDDLQueryOnCluster(updated_query, current_context, params);
|
||||
}
|
||||
|
||||
NamedCollectionFactory::instance().removeFromSQL(query);
|
||||
|
@ -73,66 +73,55 @@ static bool tryExtractConstValueFromCondition(const ASTPtr & condition, bool & v
|
||||
return false;
|
||||
}
|
||||
|
||||
void OptimizeIfWithConstantConditionVisitor::visit(ASTPtr & current_ast)
|
||||
void OptimizeIfWithConstantConditionVisitorData::visit(ASTFunction & function_node, ASTPtr & ast)
|
||||
{
|
||||
if (!current_ast)
|
||||
return;
|
||||
|
||||
checkStackSize();
|
||||
|
||||
for (ASTPtr & child : current_ast->children)
|
||||
if (function_node.name != "if")
|
||||
return;
|
||||
|
||||
if (!function_node.arguments)
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Wrong number of arguments for function 'if' (0 instead of 3)");
|
||||
|
||||
if (function_node.arguments->children.size() != 3)
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||
"Wrong number of arguments for function 'if' ({} instead of 3)",
|
||||
function_node.arguments->children.size());
|
||||
|
||||
const auto * args = function_node.arguments->as<ASTExpressionList>();
|
||||
|
||||
ASTPtr condition_expr = args->children[0];
|
||||
ASTPtr then_expr = args->children[1];
|
||||
ASTPtr else_expr = args->children[2];
|
||||
|
||||
bool condition;
|
||||
if (tryExtractConstValueFromCondition(condition_expr, condition))
|
||||
{
|
||||
auto * function_node = child->as<ASTFunction>();
|
||||
if (!function_node || function_node->name != "if")
|
||||
ASTPtr replace_ast = condition ? then_expr : else_expr;
|
||||
ASTPtr child_copy = ast;
|
||||
String replace_alias = replace_ast->tryGetAlias();
|
||||
String if_alias = ast->tryGetAlias();
|
||||
|
||||
if (replace_alias.empty())
|
||||
{
|
||||
visit(child);
|
||||
continue;
|
||||
replace_ast->setAlias(if_alias);
|
||||
ast = replace_ast;
|
||||
}
|
||||
else
|
||||
{
|
||||
/// Only copy of one node is required here.
|
||||
/// But IAST has only method for deep copy of subtree.
|
||||
/// This can be a reason of performance degradation in case of deep queries.
|
||||
ASTPtr replace_ast_deep_copy = replace_ast->clone();
|
||||
replace_ast_deep_copy->setAlias(if_alias);
|
||||
ast = replace_ast_deep_copy;
|
||||
}
|
||||
|
||||
if (!function_node->arguments)
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Wrong number of arguments for function 'if' (0 instead of 3)");
|
||||
|
||||
if (function_node->arguments->children.size() != 3)
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||
"Wrong number of arguments for function 'if' ({} instead of 3)",
|
||||
function_node->arguments->children.size());
|
||||
|
||||
visit(function_node->arguments);
|
||||
const auto * args = function_node->arguments->as<ASTExpressionList>();
|
||||
|
||||
ASTPtr condition_expr = args->children[0];
|
||||
ASTPtr then_expr = args->children[1];
|
||||
ASTPtr else_expr = args->children[2];
|
||||
|
||||
bool condition;
|
||||
if (tryExtractConstValueFromCondition(condition_expr, condition))
|
||||
if (!if_alias.empty())
|
||||
{
|
||||
ASTPtr replace_ast = condition ? then_expr : else_expr;
|
||||
ASTPtr child_copy = child;
|
||||
String replace_alias = replace_ast->tryGetAlias();
|
||||
String if_alias = child->tryGetAlias();
|
||||
|
||||
if (replace_alias.empty())
|
||||
{
|
||||
replace_ast->setAlias(if_alias);
|
||||
child = replace_ast;
|
||||
}
|
||||
else
|
||||
{
|
||||
/// Only copy of one node is required here.
|
||||
/// But IAST has only method for deep copy of subtree.
|
||||
/// This can be a reason of performance degradation in case of deep queries.
|
||||
ASTPtr replace_ast_deep_copy = replace_ast->clone();
|
||||
replace_ast_deep_copy->setAlias(if_alias);
|
||||
child = replace_ast_deep_copy;
|
||||
}
|
||||
|
||||
if (!if_alias.empty())
|
||||
{
|
||||
auto alias_it = aliases.find(if_alias);
|
||||
if (alias_it != aliases.end() && alias_it->second.get() == child_copy.get())
|
||||
alias_it->second = child;
|
||||
}
|
||||
auto alias_it = aliases.find(if_alias);
|
||||
if (alias_it != aliases.end() && alias_it->second.get() == child_copy.get())
|
||||
alias_it->second = ast;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,23 +1,24 @@
|
||||
#pragma once
|
||||
|
||||
#include <Interpreters/Aliases.h>
|
||||
#include <Interpreters/InDepthNodeVisitor.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// It removes Function_if node from AST if condition is constant.
|
||||
/// TODO: rewrite with InDepthNodeVisitor
|
||||
class OptimizeIfWithConstantConditionVisitor
|
||||
struct OptimizeIfWithConstantConditionVisitorData
|
||||
{
|
||||
public:
|
||||
explicit OptimizeIfWithConstantConditionVisitor(Aliases & aliases_)
|
||||
using TypeToVisit = ASTFunction;
|
||||
|
||||
explicit OptimizeIfWithConstantConditionVisitorData(Aliases & aliases_)
|
||||
: aliases(aliases_)
|
||||
{}
|
||||
|
||||
void visit(ASTPtr & ast);
|
||||
|
||||
void visit(ASTFunction & function_node, ASTPtr & ast);
|
||||
private:
|
||||
Aliases & aliases;
|
||||
};
|
||||
|
||||
/// It removes Function_if node from AST if condition is constant.
|
||||
using OptimizeIfWithConstantConditionVisitor = InDepthNodeVisitor<OneTypeMatcher<OptimizeIfWithConstantConditionVisitorData>, false>;
|
||||
|
||||
}
|
||||
|
@ -577,7 +577,8 @@ void TreeOptimizer::optimizeIf(ASTPtr & query, Aliases & aliases, bool if_chain_
|
||||
optimizeMultiIfToIf(query);
|
||||
|
||||
/// Optimize if with constant condition after constants was substituted instead of scalar subqueries.
|
||||
OptimizeIfWithConstantConditionVisitor(aliases).visit(query);
|
||||
OptimizeIfWithConstantConditionVisitorData visitor_data(aliases);
|
||||
OptimizeIfWithConstantConditionVisitor(visitor_data).visit(query);
|
||||
|
||||
if (if_chain_to_multiif)
|
||||
OptimizeIfChainsVisitor().visit(query);
|
||||
|
@ -15,6 +15,10 @@
|
||||
#include <Parsers/Access/ASTCreateUserQuery.h>
|
||||
#include <Parsers/Access/ASTDropAccessEntityQuery.h>
|
||||
#include <Parsers/Access/ASTGrantQuery.h>
|
||||
#include <Parsers/ASTCreateNamedCollectionQuery.h>
|
||||
#include <Parsers/ASTAlterNamedCollectionQuery.h>
|
||||
#include <Parsers/ASTDropNamedCollectionQuery.h>
|
||||
#include <Common/NamedCollections/NamedCollectionsFactory.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -38,6 +42,13 @@ static bool isAccessControlQuery(const ASTPtr & query)
|
||||
|| query->as<ASTGrantQuery>();
|
||||
}
|
||||
|
||||
static bool isNamedCollectionQuery(const ASTPtr & query)
|
||||
{
|
||||
return query->as<ASTCreateNamedCollectionQuery>()
|
||||
|| query->as<ASTDropNamedCollectionQuery>()
|
||||
|| query->as<ASTAlterNamedCollectionQuery>();
|
||||
}
|
||||
|
||||
ASTPtr removeOnClusterClauseIfNeeded(const ASTPtr & query, ContextPtr context, const WithoutOnClusterASTRewriteParams & params)
|
||||
{
|
||||
auto * query_on_cluster = dynamic_cast<ASTQueryWithOnCluster *>(query.get());
|
||||
@ -50,7 +61,10 @@ ASTPtr removeOnClusterClauseIfNeeded(const ASTPtr & query, ContextPtr context, c
|
||||
&& context->getUserDefinedSQLObjectsStorage().isReplicated())
|
||||
|| (isAccessControlQuery(query)
|
||||
&& context->getSettings().ignore_on_cluster_for_replicated_access_entities_queries
|
||||
&& context->getAccessControl().containsStorage(ReplicatedAccessStorage::STORAGE_TYPE)))
|
||||
&& context->getAccessControl().containsStorage(ReplicatedAccessStorage::STORAGE_TYPE))
|
||||
|| (isNamedCollectionQuery(query)
|
||||
&& context->getSettings().ignore_on_cluster_for_replicated_named_collections_queries
|
||||
&& NamedCollectionFactory::instance().usesReplicatedStorage()))
|
||||
{
|
||||
LOG_DEBUG(getLogger("removeOnClusterClauseIfNeeded"), "ON CLUSTER clause was ignored for query {}", query->getID());
|
||||
return query_on_cluster->getRewrittenASTWithoutOnCluster(params);
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnNullable.h>
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Formats/FormatFactory.h>
|
||||
|
||||
#include <IO/ReadBufferFromFileBase.h>
|
||||
@ -30,6 +31,7 @@
|
||||
#include <DataTypes/DataTypeUUID.h>
|
||||
#include <DataTypes/DataTypesDecimal.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/NestedUtils.h>
|
||||
|
||||
#include <boost/algorithm/string/case_conv.hpp>
|
||||
#include <parquet/file_reader.h>
|
||||
@ -111,7 +113,7 @@ struct DeltaLakeMetadataImpl
|
||||
std::set<String> result_files;
|
||||
NamesAndTypesList current_schema;
|
||||
DataLakePartitionColumns current_partition_columns;
|
||||
const auto checkpoint_version = getCheckpointIfExists(result_files);
|
||||
const auto checkpoint_version = getCheckpointIfExists(result_files, current_schema, current_partition_columns);
|
||||
|
||||
if (checkpoint_version)
|
||||
{
|
||||
@ -205,9 +207,32 @@ struct DeltaLakeMetadataImpl
|
||||
Poco::Dynamic::Var json = parser.parse(json_str);
|
||||
Poco::JSON::Object::Ptr object = json.extract<Poco::JSON::Object::Ptr>();
|
||||
|
||||
// std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
||||
// object->stringify(oss);
|
||||
// LOG_TEST(log, "Metadata: {}", oss.str());
|
||||
std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
||||
object->stringify(oss);
|
||||
LOG_TEST(log, "Metadata: {}", oss.str());
|
||||
|
||||
if (object->has("metaData"))
|
||||
{
|
||||
const auto metadata_object = object->get("metaData").extract<Poco::JSON::Object::Ptr>();
|
||||
const auto schema_object = metadata_object->getValue<String>("schemaString");
|
||||
|
||||
Poco::JSON::Parser p;
|
||||
Poco::Dynamic::Var fields_json = parser.parse(schema_object);
|
||||
const Poco::JSON::Object::Ptr & fields_object = fields_json.extract<Poco::JSON::Object::Ptr>();
|
||||
|
||||
auto current_schema = parseMetadata(fields_object);
|
||||
if (file_schema.empty())
|
||||
{
|
||||
file_schema = current_schema;
|
||||
}
|
||||
else if (file_schema != current_schema)
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED,
|
||||
"Reading from files with different schema is not possible "
|
||||
"({} is different from {})",
|
||||
file_schema.toString(), current_schema.toString());
|
||||
}
|
||||
}
|
||||
|
||||
if (object->has("add"))
|
||||
{
|
||||
@ -230,7 +255,12 @@ struct DeltaLakeMetadataImpl
|
||||
const auto value = partition_values->getValue<String>(partition_name);
|
||||
auto name_and_type = file_schema.tryGetByName(partition_name);
|
||||
if (!name_and_type)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "No such column in schema: {}", partition_name);
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"No such column in schema: {} (schema: {})",
|
||||
partition_name, file_schema.toNamesAndTypesDescription());
|
||||
}
|
||||
|
||||
auto field = getFieldValue(value, name_and_type->type);
|
||||
current_partition_columns.emplace_back(*name_and_type, field);
|
||||
@ -246,52 +276,35 @@ struct DeltaLakeMetadataImpl
|
||||
auto path = object->get("remove").extract<Poco::JSON::Object::Ptr>()->getValue<String>("path");
|
||||
result.erase(fs::path(configuration->getPath()) / path);
|
||||
}
|
||||
if (object->has("metaData"))
|
||||
{
|
||||
const auto metadata_object = object->get("metaData").extract<Poco::JSON::Object::Ptr>();
|
||||
const auto schema_object = metadata_object->getValue<String>("schemaString");
|
||||
|
||||
Poco::JSON::Parser p;
|
||||
Poco::Dynamic::Var fields_json = parser.parse(schema_object);
|
||||
Poco::JSON::Object::Ptr fields_object = fields_json.extract<Poco::JSON::Object::Ptr>();
|
||||
|
||||
const auto fields = fields_object->get("fields").extract<Poco::JSON::Array::Ptr>();
|
||||
NamesAndTypesList current_schema;
|
||||
for (size_t i = 0; i < fields->size(); ++i)
|
||||
{
|
||||
const auto field = fields->getObject(static_cast<UInt32>(i));
|
||||
auto column_name = field->getValue<String>("name");
|
||||
auto type = field->getValue<String>("type");
|
||||
auto is_nullable = field->getValue<bool>("nullable");
|
||||
|
||||
std::string physical_name;
|
||||
auto schema_metadata_object = field->get("metadata").extract<Poco::JSON::Object::Ptr>();
|
||||
if (schema_metadata_object->has("delta.columnMapping.physicalName"))
|
||||
physical_name = schema_metadata_object->getValue<String>("delta.columnMapping.physicalName");
|
||||
else
|
||||
physical_name = column_name;
|
||||
|
||||
LOG_TEST(log, "Found column: {}, type: {}, nullable: {}, physical name: {}",
|
||||
column_name, type, is_nullable, physical_name);
|
||||
|
||||
current_schema.push_back({physical_name, getFieldType(field, "type", is_nullable)});
|
||||
}
|
||||
|
||||
if (file_schema.empty())
|
||||
{
|
||||
file_schema = current_schema;
|
||||
}
|
||||
else if (file_schema != current_schema)
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED,
|
||||
"Reading from files with different schema is not possible "
|
||||
"({} is different from {})",
|
||||
file_schema.toString(), current_schema.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NamesAndTypesList parseMetadata(const Poco::JSON::Object::Ptr & metadata_json)
|
||||
{
|
||||
NamesAndTypesList schema;
|
||||
const auto fields = metadata_json->get("fields").extract<Poco::JSON::Array::Ptr>();
|
||||
for (size_t i = 0; i < fields->size(); ++i)
|
||||
{
|
||||
const auto field = fields->getObject(static_cast<UInt32>(i));
|
||||
auto column_name = field->getValue<String>("name");
|
||||
auto type = field->getValue<String>("type");
|
||||
auto is_nullable = field->getValue<bool>("nullable");
|
||||
|
||||
std::string physical_name;
|
||||
auto schema_metadata_object = field->get("metadata").extract<Poco::JSON::Object::Ptr>();
|
||||
if (schema_metadata_object->has("delta.columnMapping.physicalName"))
|
||||
physical_name = schema_metadata_object->getValue<String>("delta.columnMapping.physicalName");
|
||||
else
|
||||
physical_name = column_name;
|
||||
|
||||
LOG_TEST(log, "Found column: {}, type: {}, nullable: {}, physical name: {}",
|
||||
column_name, type, is_nullable, physical_name);
|
||||
|
||||
schema.push_back({physical_name, getFieldType(field, "type", is_nullable)});
|
||||
}
|
||||
return schema;
|
||||
}
|
||||
|
||||
DataTypePtr getFieldType(const Poco::JSON::Object::Ptr & field, const String & type_key, bool is_nullable)
|
||||
{
|
||||
if (field->isObject(type_key))
|
||||
@ -505,7 +518,10 @@ struct DeltaLakeMetadataImpl
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Arrow error: {}", _s.ToString()); \
|
||||
} while (false)
|
||||
|
||||
size_t getCheckpointIfExists(std::set<String> & result)
|
||||
size_t getCheckpointIfExists(
|
||||
std::set<String> & result,
|
||||
NamesAndTypesList & file_schema,
|
||||
DataLakePartitionColumns & file_partition_columns)
|
||||
{
|
||||
const auto version = readLastCheckpointIfExists();
|
||||
if (!version)
|
||||
@ -526,7 +542,8 @@ struct DeltaLakeMetadataImpl
|
||||
auto columns = ParquetSchemaReader(*buf, format_settings).readSchema();
|
||||
|
||||
/// Read only columns that we need.
|
||||
columns.filterColumns(NameSet{"add", "remove"});
|
||||
auto filter_column_names = NameSet{"add", "metaData"};
|
||||
columns.filterColumns(filter_column_names);
|
||||
Block header;
|
||||
for (const auto & column : columns)
|
||||
header.insert({column.type->createColumn(), column.type, column.name});
|
||||
@ -540,9 +557,6 @@ struct DeltaLakeMetadataImpl
|
||||
ArrowMemoryPool::instance(),
|
||||
&reader));
|
||||
|
||||
std::shared_ptr<arrow::Schema> file_schema;
|
||||
THROW_ARROW_NOT_OK(reader->GetSchema(&file_schema));
|
||||
|
||||
ArrowColumnToCHColumn column_reader(
|
||||
header, "Parquet",
|
||||
format_settings.parquet.allow_missing_columns,
|
||||
@ -553,29 +567,85 @@ struct DeltaLakeMetadataImpl
|
||||
std::shared_ptr<arrow::Table> table;
|
||||
THROW_ARROW_NOT_OK(reader->ReadTable(&table));
|
||||
|
||||
Chunk res = column_reader.arrowTableToCHChunk(table, reader->parquet_reader()->metadata()->num_rows());
|
||||
const auto & res_columns = res.getColumns();
|
||||
Chunk chunk = column_reader.arrowTableToCHChunk(table, reader->parquet_reader()->metadata()->num_rows());
|
||||
auto res_block = header.cloneWithColumns(chunk.detachColumns());
|
||||
res_block = Nested::flatten(res_block);
|
||||
|
||||
if (res_columns.size() != 2)
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::INCORRECT_DATA,
|
||||
"Unexpected number of columns: {} (having: {}, expected: {})",
|
||||
res_columns.size(), res.dumpStructure(), header.dumpStructure());
|
||||
}
|
||||
const auto * nullable_path_column = assert_cast<const ColumnNullable *>(res_block.getByName("add.path").column.get());
|
||||
const auto & path_column = assert_cast<const ColumnString &>(nullable_path_column->getNestedColumn());
|
||||
|
||||
const auto * nullable_schema_column = assert_cast<const ColumnNullable *>(res_block.getByName("metaData.schemaString").column.get());
|
||||
const auto & schema_column = assert_cast<const ColumnString &>(nullable_schema_column->getNestedColumn());
|
||||
|
||||
auto partition_values_column_raw = res_block.getByName("add.partitionValues").column;
|
||||
const auto & partition_values_column = assert_cast<const ColumnMap &>(*partition_values_column_raw);
|
||||
|
||||
const auto * tuple_column = assert_cast<const ColumnTuple *>(res_columns[0].get());
|
||||
const auto & nullable_column = assert_cast<const ColumnNullable &>(tuple_column->getColumn(0));
|
||||
const auto & path_column = assert_cast<const ColumnString &>(nullable_column.getNestedColumn());
|
||||
for (size_t i = 0; i < path_column.size(); ++i)
|
||||
{
|
||||
const auto filename = String(path_column.getDataAt(i));
|
||||
if (filename.empty())
|
||||
const auto metadata = String(schema_column.getDataAt(i));
|
||||
if (!metadata.empty())
|
||||
{
|
||||
Poco::JSON::Parser parser;
|
||||
Poco::Dynamic::Var json = parser.parse(metadata);
|
||||
const Poco::JSON::Object::Ptr & object = json.extract<Poco::JSON::Object::Ptr>();
|
||||
|
||||
auto current_schema = parseMetadata(object);
|
||||
if (file_schema.empty())
|
||||
{
|
||||
file_schema = current_schema;
|
||||
LOG_TEST(log, "Processed schema from checkpoint: {}", file_schema.toString());
|
||||
}
|
||||
else if (file_schema != current_schema)
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED,
|
||||
"Reading from files with different schema is not possible "
|
||||
"({} is different from {})",
|
||||
file_schema.toString(), current_schema.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < path_column.size(); ++i)
|
||||
{
|
||||
const auto path = String(path_column.getDataAt(i));
|
||||
if (path.empty())
|
||||
continue;
|
||||
LOG_TEST(log, "Adding {}", filename);
|
||||
const auto [_, inserted] = result.insert(std::filesystem::path(configuration->getPath()) / filename);
|
||||
|
||||
auto filename = fs::path(path).filename().string();
|
||||
auto it = file_partition_columns.find(filename);
|
||||
if (it == file_partition_columns.end())
|
||||
{
|
||||
Field map;
|
||||
partition_values_column.get(i, map);
|
||||
auto partition_values_map = map.safeGet<Map>();
|
||||
if (!partition_values_map.empty())
|
||||
{
|
||||
auto & current_partition_columns = file_partition_columns[filename];
|
||||
for (const auto & map_value : partition_values_map)
|
||||
{
|
||||
const auto tuple = map_value.safeGet<Tuple>();
|
||||
const auto partition_name = tuple[0].safeGet<String>();
|
||||
auto name_and_type = file_schema.tryGetByName(partition_name);
|
||||
if (!name_and_type)
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"No such column in schema: {} (schema: {})",
|
||||
partition_name, file_schema.toString());
|
||||
}
|
||||
const auto value = tuple[1].safeGet<String>();
|
||||
auto field = getFieldValue(value, name_and_type->type);
|
||||
current_partition_columns.emplace_back(std::move(name_and_type.value()), std::move(field));
|
||||
|
||||
LOG_TEST(log, "Partition {} value is {} (for {})", partition_name, value, filename);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
LOG_TEST(log, "Adding {}", path);
|
||||
const auto [_, inserted] = result.insert(std::filesystem::path(configuration->getPath()) / path);
|
||||
if (!inserted)
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "File already exists {}", filename);
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "File already exists {}", path);
|
||||
}
|
||||
|
||||
return version;
|
||||
|
@ -41,6 +41,7 @@ public:
|
||||
auto object_storage = base_configuration->createObjectStorage(context, /* is_readonly */true);
|
||||
DataLakeMetadataPtr metadata;
|
||||
NamesAndTypesList schema_from_metadata;
|
||||
const bool use_schema_from_metadata = columns_.empty();
|
||||
|
||||
if (base_configuration->format == "auto")
|
||||
base_configuration->format = "Parquet";
|
||||
@ -50,8 +51,9 @@ public:
|
||||
try
|
||||
{
|
||||
metadata = DataLakeMetadata::create(object_storage, base_configuration, context);
|
||||
schema_from_metadata = metadata->getTableSchema();
|
||||
configuration->setPaths(metadata->getDataFiles());
|
||||
if (use_schema_from_metadata)
|
||||
schema_from_metadata = metadata->getTableSchema();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -66,7 +68,7 @@ public:
|
||||
return std::make_shared<IStorageDataLake<DataLakeMetadata>>(
|
||||
base_configuration, std::move(metadata), configuration, object_storage,
|
||||
context, table_id_,
|
||||
columns_.empty() ? ColumnsDescription(schema_from_metadata) : columns_,
|
||||
use_schema_from_metadata ? ColumnsDescription(schema_from_metadata) : columns_,
|
||||
constraints_, comment_, format_settings_);
|
||||
}
|
||||
|
||||
|
@ -206,23 +206,25 @@ Chunk StorageObjectStorageSource::generate()
|
||||
if (!partition_columns.empty() && chunk_size && chunk.hasColumns())
|
||||
{
|
||||
auto partition_values = partition_columns.find(filename);
|
||||
|
||||
for (const auto & [name_and_type, value] : partition_values->second)
|
||||
if (partition_values != partition_columns.end())
|
||||
{
|
||||
if (!read_from_format_info.source_header.has(name_and_type.name))
|
||||
continue;
|
||||
for (const auto & [name_and_type, value] : partition_values->second)
|
||||
{
|
||||
if (!read_from_format_info.source_header.has(name_and_type.name))
|
||||
continue;
|
||||
|
||||
const auto column_pos = read_from_format_info.source_header.getPositionByName(name_and_type.name);
|
||||
auto partition_column = name_and_type.type->createColumnConst(chunk.getNumRows(), value)->convertToFullColumnIfConst();
|
||||
const auto column_pos = read_from_format_info.source_header.getPositionByName(name_and_type.name);
|
||||
auto partition_column = name_and_type.type->createColumnConst(chunk.getNumRows(), value)->convertToFullColumnIfConst();
|
||||
|
||||
/// This column is filled with default value now, remove it.
|
||||
chunk.erase(column_pos);
|
||||
/// This column is filled with default value now, remove it.
|
||||
chunk.erase(column_pos);
|
||||
|
||||
/// Add correct values.
|
||||
if (chunk.hasColumns())
|
||||
chunk.addColumn(column_pos, std::move(partition_column));
|
||||
else
|
||||
chunk.addColumn(std::move(partition_column));
|
||||
/// Add correct values.
|
||||
if (column_pos < chunk.getNumColumns())
|
||||
chunk.addColumn(column_pos, std::move(partition_column));
|
||||
else
|
||||
chunk.addColumn(std::move(partition_column));
|
||||
}
|
||||
}
|
||||
}
|
||||
return chunk;
|
||||
|
@ -5,20 +5,21 @@
|
||||
|
||||
#include <base/hex.h>
|
||||
#include <base/interpolate.h>
|
||||
#include <Common/FailPoint.h>
|
||||
#include <Common/Macros.h>
|
||||
#include <Common/MemoryTracker.h>
|
||||
#include <Common/ProfileEventsScope.h>
|
||||
#include <Common/StringUtils.h>
|
||||
#include <Common/ThreadFuzzer.h>
|
||||
#include <Common/ZooKeeper/KeeperException.h>
|
||||
#include <Common/ZooKeeper/Types.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/noexcept_scope.h>
|
||||
#include <Common/randomDelay.h>
|
||||
#include <Common/thread_local_rng.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/ThreadFuzzer.h>
|
||||
#include <Common/FailPoint.h>
|
||||
#include <Common/randomDelay.h>
|
||||
|
||||
#include <Core/ServerUUID.h>
|
||||
|
||||
@ -5272,6 +5273,8 @@ void StorageReplicatedMergeTree::flushAndPrepareForShutdown()
|
||||
if (shutdown_prepared_called.exchange(true))
|
||||
return;
|
||||
|
||||
LOG_TRACE(log, "Start preparing for shutdown");
|
||||
|
||||
try
|
||||
{
|
||||
auto settings_ptr = getSettings();
|
||||
@ -5282,7 +5285,11 @@ void StorageReplicatedMergeTree::flushAndPrepareForShutdown()
|
||||
stopBeingLeader();
|
||||
|
||||
if (attach_thread)
|
||||
{
|
||||
attach_thread->shutdown();
|
||||
LOG_TRACE(log, "The attach thread is shutdown");
|
||||
}
|
||||
|
||||
|
||||
restarting_thread.shutdown(/* part_of_full_shutdown */true);
|
||||
/// Explicitly set the event, because the restarting thread will not set it again
|
||||
@ -5295,6 +5302,8 @@ void StorageReplicatedMergeTree::flushAndPrepareForShutdown()
|
||||
shutdown_deadline.emplace(std::chrono::system_clock::now());
|
||||
throw;
|
||||
}
|
||||
|
||||
LOG_TRACE(log, "Finished preparing for shutdown");
|
||||
}
|
||||
|
||||
void StorageReplicatedMergeTree::partialShutdown()
|
||||
@ -5332,6 +5341,8 @@ void StorageReplicatedMergeTree::shutdown(bool)
|
||||
if (shutdown_called.exchange(true))
|
||||
return;
|
||||
|
||||
LOG_TRACE(log, "Shutdown started");
|
||||
|
||||
flushAndPrepareForShutdown();
|
||||
|
||||
if (!shutdown_deadline.has_value())
|
||||
@ -5374,6 +5385,7 @@ void StorageReplicatedMergeTree::shutdown(bool)
|
||||
/// Wait for all of them
|
||||
std::lock_guard lock(data_parts_exchange_ptr->rwlock);
|
||||
}
|
||||
LOG_TRACE(log, "Shutdown finished");
|
||||
}
|
||||
|
||||
|
||||
|
@ -35,7 +35,6 @@ void registerStorageFuzzJSON(StorageFactory & factory);
|
||||
void registerStorageS3(StorageFactory & factory);
|
||||
void registerStorageHudi(StorageFactory & factory);
|
||||
void registerStorageS3Queue(StorageFactory & factory);
|
||||
void registerStorageAzureQueue(StorageFactory & factory);
|
||||
|
||||
#if USE_PARQUET
|
||||
void registerStorageDeltaLake(StorageFactory & factory);
|
||||
@ -45,6 +44,10 @@ void registerStorageIceberg(StorageFactory & factory);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if USE_AZURE_BLOB_STORAGE
|
||||
void registerStorageAzureQueue(StorageFactory & factory);
|
||||
#endif
|
||||
|
||||
#if USE_HDFS
|
||||
#if USE_HIVE
|
||||
void registerStorageHive(StorageFactory & factory);
|
||||
|
@ -15,3 +15,4 @@ warn_return_any = True
|
||||
no_implicit_reexport = True
|
||||
strict_equality = True
|
||||
extra_checks = True
|
||||
ignore_missing_imports = True
|
@ -15,7 +15,7 @@ import upload_result_helper
|
||||
from build_check import get_release_or_pr
|
||||
from ci_config import CI
|
||||
from ci_metadata import CiMetadata
|
||||
from ci_utils import GHActions, normalize_string
|
||||
from ci_utils import GHActions, normalize_string, Shell
|
||||
from clickhouse_helper import (
|
||||
CiLogsCredentials,
|
||||
ClickHouseHelper,
|
||||
@ -53,6 +53,7 @@ from stopwatch import Stopwatch
|
||||
from tee_popen import TeePopen
|
||||
from ci_cache import CiCache
|
||||
from ci_settings import CiSettings
|
||||
from ci_buddy import CIBuddy
|
||||
from version_helper import get_version_from_repo
|
||||
|
||||
# pylint: disable=too-many-lines
|
||||
@ -262,6 +263,8 @@ def check_missing_images_on_dockerhub(
|
||||
|
||||
|
||||
def _pre_action(s3, indata, pr_info):
|
||||
print("Clear dmesg")
|
||||
Shell.run("sudo dmesg --clear ||:")
|
||||
CommitStatusData.cleanup()
|
||||
JobReport.cleanup()
|
||||
BuildResult.cleanup()
|
||||
@ -1118,6 +1121,14 @@ def main() -> int:
|
||||
|
||||
### POST action: start
|
||||
elif args.post:
|
||||
if Shell.check(
|
||||
"sudo dmesg -T | grep -q -e 'Out of memory: Killed process' -e 'oom_reaper: reaped process' -e 'oom-kill:constraint=CONSTRAINT_NONE'"
|
||||
):
|
||||
print("WARNING: OOM while job execution")
|
||||
CIBuddy(dry_run=not pr_info.is_release).post_error(
|
||||
"Out Of Memory", job_name=_get_ext_check_name(args.job_name)
|
||||
)
|
||||
|
||||
job_report = JobReport.load() if JobReport.exist() else None
|
||||
if job_report:
|
||||
ch_helper = ClickHouseHelper()
|
||||
|
88
tests/ci/ci_buddy.py
Normal file
88
tests/ci/ci_buddy.py
Normal file
@ -0,0 +1,88 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
import boto3
|
||||
import requests
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
from pr_info import PRInfo
|
||||
from ci_utils import Shell
|
||||
|
||||
|
||||
class CIBuddy:
|
||||
_HEADERS = {"Content-Type": "application/json"}
|
||||
|
||||
def __init__(self, dry_run=False):
|
||||
self.repo = os.getenv("GITHUB_REPOSITORY", "")
|
||||
self.dry_run = dry_run
|
||||
res = self._get_webhooks()
|
||||
self.test_channel = ""
|
||||
self.dev_ci_channel = ""
|
||||
if res:
|
||||
self.test_channel = json.loads(res)["test_channel"]
|
||||
self.dev_ci_channel = json.loads(res)["ci_channel"]
|
||||
self.job_name = os.getenv("CHECK_NAME", "unknown")
|
||||
pr_info = PRInfo()
|
||||
self.pr_number = pr_info.number
|
||||
self.head_ref = pr_info.head_ref
|
||||
self.commit_url = pr_info.commit_html_url
|
||||
|
||||
@staticmethod
|
||||
def _get_webhooks():
|
||||
name = "ci_buddy_web_hooks"
|
||||
|
||||
session = boto3.Session(region_name="us-east-1") # Replace with your region
|
||||
ssm_client = session.client("ssm")
|
||||
json_string = None
|
||||
try:
|
||||
response = ssm_client.get_parameter(
|
||||
Name=name,
|
||||
WithDecryption=True, # Set to True if the parameter is a SecureString
|
||||
)
|
||||
json_string = response["Parameter"]["Value"]
|
||||
except ClientError as e:
|
||||
print(f"An error occurred: {e}")
|
||||
|
||||
return json_string
|
||||
|
||||
def post(self, message, dry_run=None):
|
||||
if dry_run is None:
|
||||
dry_run = self.dry_run
|
||||
print(f"Posting slack message, dry_run [{dry_run}]")
|
||||
if dry_run:
|
||||
url = self.test_channel
|
||||
else:
|
||||
url = self.dev_ci_channel
|
||||
data = {"text": message}
|
||||
try:
|
||||
requests.post(url, headers=self._HEADERS, data=json.dumps(data), timeout=10)
|
||||
except Exception as e:
|
||||
print(f"ERROR: Failed to post message, ex {e}")
|
||||
|
||||
def post_error(self, error_description, job_name="", with_instance_info=True):
|
||||
instance_id, instance_type = "unknown", "unknown"
|
||||
if with_instance_info:
|
||||
instance_id = Shell.run("ec2metadata --instance-id") or instance_id
|
||||
instance_type = Shell.run("ec2metadata --instance-type") or instance_type
|
||||
if not job_name:
|
||||
job_name = os.getenv("CHECK_NAME", "unknown")
|
||||
line_err = f":red_circle: *Error: {error_description}*\n\n"
|
||||
line_ghr = f" *Runner:* `{instance_type}`, `{instance_id}`\n"
|
||||
line_job = f" *Job:* `{job_name}`\n"
|
||||
line_pr_ = f" *PR:* <https://github.com/{self.repo}/pull/{self.pr_number}|#{self.pr_number}>\n"
|
||||
line_br_ = f" *Branch:* `{self.head_ref}`, <{self.commit_url}|commit>\n"
|
||||
message = line_err
|
||||
message += line_job
|
||||
if with_instance_info:
|
||||
message += line_ghr
|
||||
if self.pr_number > 0:
|
||||
message += line_pr_
|
||||
else:
|
||||
message += line_br_
|
||||
self.post(message)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# test
|
||||
buddy = CIBuddy(dry_run=True)
|
||||
buddy.post_error("Out of memory")
|
@ -1,4 +1,5 @@
|
||||
import os
|
||||
import subprocess
|
||||
from contextlib import contextmanager
|
||||
from pathlib import Path
|
||||
from typing import Any, Iterator, List, Union
|
||||
@ -42,3 +43,43 @@ class GHActions:
|
||||
for line in lines:
|
||||
print(line)
|
||||
print("::endgroup::")
|
||||
|
||||
|
||||
class Shell:
|
||||
@classmethod
|
||||
def run_strict(cls, command):
|
||||
subprocess.run(
|
||||
command + " 2>&1",
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
check=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def run(cls, command):
|
||||
res = ""
|
||||
result = subprocess.run(
|
||||
command,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
check=False,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
res = result.stdout
|
||||
return res.strip()
|
||||
|
||||
@classmethod
|
||||
def check(cls, command):
|
||||
result = subprocess.run(
|
||||
command + " 2>&1",
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
check=False,
|
||||
)
|
||||
return result.returncode == 0
|
||||
|
@ -9,4 +9,21 @@
|
||||
<key1>value1</key1>
|
||||
</collection1>
|
||||
</named_collections>
|
||||
|
||||
<remote_servers>
|
||||
<replicated_nc_nodes_cluster>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<host>node_with_keeper</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>node_with_keeper_2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<allow_distributed_ddl_queries>true</allow_distributed_ddl_queries>
|
||||
</replicated_nc_nodes_cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
||||
|
@ -1,4 +1,9 @@
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
<ignore_on_cluster_for_replicated_named_collections_queries>0</ignore_on_cluster_for_replicated_named_collections_queries>
|
||||
</default>
|
||||
</profiles>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
|
@ -3,6 +3,8 @@ import pytest
|
||||
import os
|
||||
import time
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from contextlib import nullcontext as does_not_raise
|
||||
from helpers.client import QueryRuntimeException
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
NAMED_COLLECTIONS_CONFIG = os.path.join(
|
||||
@ -761,3 +763,32 @@ def test_keeper_storage(cluster):
|
||||
|
||||
check_dropped(node1)
|
||||
check_dropped(node2)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"ignore, expected_raise",
|
||||
[(True, does_not_raise()), (False, pytest.raises(QueryRuntimeException))],
|
||||
)
|
||||
def test_keeper_storage_remove_on_cluster(cluster, ignore, expected_raise):
|
||||
node = cluster.instances["node_with_keeper"]
|
||||
|
||||
replace_in_users_config(
|
||||
node,
|
||||
"ignore_on_cluster_for_replicated_named_collections_queries>.",
|
||||
f"ignore_on_cluster_for_replicated_named_collections_queries>{int(ignore)}",
|
||||
)
|
||||
node.query("SYSTEM RELOAD CONFIG")
|
||||
|
||||
with expected_raise:
|
||||
node.query(
|
||||
"DROP NAMED COLLECTION IF EXISTS test_nc ON CLUSTER `replicated_nc_nodes_cluster`"
|
||||
)
|
||||
node.query(
|
||||
f"CREATE NAMED COLLECTION test_nc ON CLUSTER `replicated_nc_nodes_cluster` AS key1=1, key2=2 OVERRIDABLE"
|
||||
)
|
||||
node.query(
|
||||
f"ALTER NAMED COLLECTION test_nc ON CLUSTER `replicated_nc_nodes_cluster` SET key2=3"
|
||||
)
|
||||
node.query(
|
||||
f"DROP NAMED COLLECTION test_nc ON CLUSTER `replicated_nc_nodes_cluster`"
|
||||
)
|
||||
|
@ -596,19 +596,116 @@ def test_partition_columns(started_cluster):
|
||||
)
|
||||
assert result == 1
|
||||
|
||||
# instance.query(
|
||||
# f"""
|
||||
# DROP TABLE IF EXISTS {TABLE_NAME};
|
||||
# CREATE TABLE {TABLE_NAME} (a Int32, b String, c DateTime)
|
||||
# ENGINE=DeltaLake('http://{started_cluster.minio_ip}:{started_cluster.minio_port}/{bucket}/{result_file}/', 'minio', 'minio123')"""
|
||||
# )
|
||||
# assert (
|
||||
# int(
|
||||
# instance.query(
|
||||
# f"SELECT count() FROM {TABLE_NAME} WHERE c != toDateTime('2000/01/05')"
|
||||
# )
|
||||
# )
|
||||
# == num_rows - 1
|
||||
# )
|
||||
# instance.query(f"SELECT a, b, c, FROM {TABLE_NAME}")
|
||||
# assert False
|
||||
instance.query(
|
||||
f"""
|
||||
DROP TABLE IF EXISTS {TABLE_NAME};
|
||||
CREATE TABLE {TABLE_NAME} (a Nullable(Int32), b Nullable(String), c Nullable(Date32), d Nullable(Int32), e Nullable(Bool))
|
||||
ENGINE=DeltaLake('http://{started_cluster.minio_ip}:{started_cluster.minio_port}/{bucket}/{result_file}/', 'minio', 'minio123')"""
|
||||
)
|
||||
assert (
|
||||
"""1 test1 2000-01-01 1 false
|
||||
2 test2 2000-01-02 2 false
|
||||
3 test3 2000-01-03 3 false
|
||||
4 test4 2000-01-04 4 false
|
||||
5 test5 2000-01-05 5 false
|
||||
6 test6 2000-01-06 6 false
|
||||
7 test7 2000-01-07 7 false
|
||||
8 test8 2000-01-08 8 false
|
||||
9 test9 2000-01-09 9 false"""
|
||||
== instance.query(f"SELECT * FROM {TABLE_NAME} ORDER BY b").strip()
|
||||
)
|
||||
|
||||
assert (
|
||||
int(
|
||||
instance.query(
|
||||
f"SELECT count() FROM {TABLE_NAME} WHERE c == toDateTime('2000/01/05')"
|
||||
)
|
||||
)
|
||||
== 1
|
||||
)
|
||||
|
||||
# Subset of columns should work.
|
||||
instance.query(
|
||||
f"""
|
||||
DROP TABLE IF EXISTS {TABLE_NAME};
|
||||
CREATE TABLE {TABLE_NAME} (b Nullable(String), c Nullable(Date32), d Nullable(Int32))
|
||||
ENGINE=DeltaLake('http://{started_cluster.minio_ip}:{started_cluster.minio_port}/{bucket}/{result_file}/', 'minio', 'minio123')"""
|
||||
)
|
||||
assert (
|
||||
"""test1 2000-01-01 1
|
||||
test2 2000-01-02 2
|
||||
test3 2000-01-03 3
|
||||
test4 2000-01-04 4
|
||||
test5 2000-01-05 5
|
||||
test6 2000-01-06 6
|
||||
test7 2000-01-07 7
|
||||
test8 2000-01-08 8
|
||||
test9 2000-01-09 9"""
|
||||
== instance.query(f"SELECT * FROM {TABLE_NAME} ORDER BY b").strip()
|
||||
)
|
||||
|
||||
for i in range(num_rows + 1, 2 * num_rows + 1):
|
||||
data = [
|
||||
(
|
||||
i,
|
||||
"test" + str(i),
|
||||
datetime.strptime(f"2000-01-{i}", "%Y-%m-%d"),
|
||||
i,
|
||||
False,
|
||||
)
|
||||
]
|
||||
df = spark.createDataFrame(data=data, schema=schema)
|
||||
df.printSchema()
|
||||
df.write.mode("append").format("delta").partitionBy(partition_columns).save(
|
||||
f"/{TABLE_NAME}"
|
||||
)
|
||||
|
||||
files = upload_directory(minio_client, bucket, f"/{TABLE_NAME}", "")
|
||||
ok = False
|
||||
for file in files:
|
||||
if file.endswith("last_checkpoint"):
|
||||
ok = True
|
||||
assert ok
|
||||
|
||||
result = int(
|
||||
instance.query(
|
||||
f"""SELECT count()
|
||||
FROM deltaLake('http://{started_cluster.minio_ip}:{started_cluster.minio_port}/{bucket}/{result_file}/', 'minio', 'minio123')
|
||||
"""
|
||||
)
|
||||
)
|
||||
assert result == num_rows * 2
|
||||
|
||||
assert (
|
||||
"""1 test1 2000-01-01 1 false
|
||||
2 test2 2000-01-02 2 false
|
||||
3 test3 2000-01-03 3 false
|
||||
4 test4 2000-01-04 4 false
|
||||
5 test5 2000-01-05 5 false
|
||||
6 test6 2000-01-06 6 false
|
||||
7 test7 2000-01-07 7 false
|
||||
8 test8 2000-01-08 8 false
|
||||
9 test9 2000-01-09 9 false
|
||||
10 test10 2000-01-10 10 false
|
||||
11 test11 2000-01-11 11 false
|
||||
12 test12 2000-01-12 12 false
|
||||
13 test13 2000-01-13 13 false
|
||||
14 test14 2000-01-14 14 false
|
||||
15 test15 2000-01-15 15 false
|
||||
16 test16 2000-01-16 16 false
|
||||
17 test17 2000-01-17 17 false
|
||||
18 test18 2000-01-18 18 false"""
|
||||
== instance.query(
|
||||
f"""
|
||||
SELECT * FROM deltaLake('http://{started_cluster.minio_ip}:{started_cluster.minio_port}/{bucket}/{result_file}/', 'minio', 'minio123') ORDER BY c
|
||||
"""
|
||||
).strip()
|
||||
)
|
||||
assert (
|
||||
int(
|
||||
instance.query(
|
||||
f"SELECT count() FROM {TABLE_NAME} WHERE c == toDateTime('2000/01/15')"
|
||||
)
|
||||
)
|
||||
== 1
|
||||
)
|
||||
|
@ -78,13 +78,13 @@ def wait_rabbitmq_to_start(rabbitmq_docker_id, cookie, timeout=180):
|
||||
|
||||
def kill_rabbitmq(rabbitmq_id):
|
||||
p = subprocess.Popen(("docker", "stop", rabbitmq_id), stdout=subprocess.PIPE)
|
||||
p.communicate()
|
||||
p.wait(timeout=60)
|
||||
return p.returncode == 0
|
||||
|
||||
|
||||
def revive_rabbitmq(rabbitmq_id, cookie):
|
||||
p = subprocess.Popen(("docker", "start", rabbitmq_id), stdout=subprocess.PIPE)
|
||||
p.communicate()
|
||||
p.wait(timeout=60)
|
||||
wait_rabbitmq_to_start(rabbitmq_id, cookie)
|
||||
|
||||
|
||||
|
@ -1 +1,2 @@
|
||||
42
|
||||
42
|
||||
|
@ -17,3 +17,27 @@ ENGINE = MergeTree ORDER BY conversation;
|
||||
INSERT INTO t2(conversation) VALUES (42);
|
||||
|
||||
select * from t2;
|
||||
|
||||
drop table t1;
|
||||
|
||||
INSERT INTO t2(conversation) VALUES (42); -- { serverError UNKNOWN_TABLE }
|
||||
|
||||
drop table t2;
|
||||
|
||||
CREATE TABLE t2 (
|
||||
`conversation` UInt64,
|
||||
CONSTRAINT constraint_conversation CHECK conversation IN (SELECT id FROM t1)
|
||||
)
|
||||
ENGINE = MergeTree ORDER BY conversation;
|
||||
|
||||
INSERT INTO t2(conversation) VALUES (42); -- { serverError UNKNOWN_TABLE }
|
||||
|
||||
CREATE TABLE t1 (
|
||||
`id` UInt64
|
||||
)
|
||||
ENGINE = MergeTree ORDER BY id;
|
||||
|
||||
INSERT INTO t1(id) VALUES (42);
|
||||
|
||||
INSERT INTO t2(conversation) VALUES (42);
|
||||
select * from t2;
|
||||
|
@ -0,0 +1,21 @@
|
||||
2020-01-01
|
||||
2020-01-01
|
||||
2020-01-01 00:00:00
|
||||
2020-01-01 00:00:00.000000
|
||||
127.0.0.1
|
||||
2001:db8:85a3::8a2e:370:7334
|
||||
42
|
||||
42
|
||||
42
|
||||
42
|
||||
42
|
||||
42
|
||||
42
|
||||
42
|
||||
42
|
||||
42
|
||||
Hello
|
||||
Hello
|
||||
\0\0\0
|
||||
Hello\0\0\0\0\0
|
||||
5801c962-1182-458a-89f8-d077da5074f9
|
29
tests/queries/0_stateless/03198_json_extract_more_types.sql
Normal file
29
tests/queries/0_stateless/03198_json_extract_more_types.sql
Normal file
@ -0,0 +1,29 @@
|
||||
set allow_suspicious_low_cardinality_types=1;
|
||||
|
||||
select JSONExtract('{"a" : "2020-01-01"}', 'a', 'Date');
|
||||
select JSONExtract('{"a" : "2020-01-01"}', 'a', 'Date32');
|
||||
select JSONExtract('{"a" : "2020-01-01 00:00:00"}', 'a', 'DateTime');
|
||||
select JSONExtract('{"a" : "2020-01-01 00:00:00.000000"}', 'a', 'DateTime64(6)');
|
||||
select JSONExtract('{"a" : "127.0.0.1"}', 'a', 'IPv4');
|
||||
select JSONExtract('{"a" : "2001:0db8:85a3:0000:0000:8a2e:0370:7334"}', 'a', 'IPv6');
|
||||
|
||||
|
||||
select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(UInt8)');
|
||||
select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(Int8)');
|
||||
select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(UInt16)');
|
||||
select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(Int16)');
|
||||
select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(UInt32)');
|
||||
select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(Int32)');
|
||||
select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(UInt64)');
|
||||
select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(Int64)');
|
||||
|
||||
select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(Float32)');
|
||||
select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(Float32)');
|
||||
|
||||
select JSONExtract('{"a" : "Hello"}', 'a', 'LowCardinality(String)');
|
||||
select JSONExtract('{"a" : "Hello"}', 'a', 'LowCardinality(FixedString(5))');
|
||||
select JSONExtract('{"a" : "Hello"}', 'a', 'LowCardinality(FixedString(3))');
|
||||
select JSONExtract('{"a" : "Hello"}', 'a', 'LowCardinality(FixedString(10))');
|
||||
|
||||
select JSONExtract('{"a" : "5801c962-1182-458a-89f8-d077da5074f9"}', 'a', 'LowCardinality(UUID)');
|
||||
|
@ -0,0 +1,6 @@
|
||||
SET allow_experimental_analyzer = 1;
|
||||
|
||||
DROP TABLE IF EXISTS table_with_materialized;
|
||||
CREATE TABLE table_with_materialized (col String MATERIALIZED 'A') ENGINE = Memory;
|
||||
SELECT number FROM numbers(1) AS n, table_with_materialized;
|
||||
DROP TABLE table_with_materialized;
|
@ -0,0 +1,30 @@
|
||||
true Bool
|
||||
42 Int64
|
||||
-42 Int64
|
||||
18446744073709551615 UInt64
|
||||
42.42 Float64
|
||||
42 Int64
|
||||
-42 Int64
|
||||
18446744073709551615 UInt64
|
||||
Hello String
|
||||
2020-01-01 Date
|
||||
2020-01-01 00:00:00.000000000 DateTime64(9)
|
||||
[1,2,3] Array(Nullable(Int64))
|
||||
['str1','str2','str3'] Array(Nullable(String))
|
||||
[[[1],[2,3,4]],[[5,6],[7]]] Array(Array(Array(Nullable(Int64))))
|
||||
['2020-01-01 00:00:00.000000000','2020-01-01 00:00:00.000000000'] Array(Nullable(DateTime64(9)))
|
||||
['2020-01-01','2020-01-01 date'] Array(Nullable(String))
|
||||
['2020-01-01','2020-01-01 00:00:00','str'] Array(Nullable(String))
|
||||
['2020-01-01','2020-01-01 00:00:00','42'] Array(Nullable(String))
|
||||
['str','42'] Array(Nullable(String))
|
||||
[42,42.42] Array(Nullable(Float64))
|
||||
[42,18446744073709552000,42.42] Array(Nullable(Float64))
|
||||
[42,42.42] Array(Nullable(Float64))
|
||||
[NULL,NULL] Array(Nullable(String))
|
||||
[NULL,42] Array(Nullable(Int64))
|
||||
[[NULL],[],[42]] Array(Array(Nullable(Int64)))
|
||||
[[],[NULL,NULL],[1,NULL,3],[NULL,2,NULL]] Array(Array(Nullable(Int64)))
|
||||
[[],[NULL,NULL],['1',NULL,'3'],[NULL,'2',NULL],['2020-01-01']] Array(Array(Nullable(String)))
|
||||
('str',42,[42]) Tuple(Nullable(String), Nullable(Int64), Array(Nullable(Int64)))
|
||||
[42,18446744073709551615] Array(Nullable(UInt64))
|
||||
(-42,18446744073709551615) Tuple(Nullable(Int64), Nullable(UInt64))
|
37
tests/queries/0_stateless/03199_json_extract_dynamic.sql
Normal file
37
tests/queries/0_stateless/03199_json_extract_dynamic.sql
Normal file
@ -0,0 +1,37 @@
|
||||
set input_format_json_try_infer_numbers_from_strings=1;
|
||||
|
||||
select JSONExtract(materialize('{"d" : true}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : 42}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : -42}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : 18446744073709551615}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : 42.42}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : "42"}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : "-42"}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : "18446744073709551615"}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
|
||||
select JSONExtract(materialize('{"d" : "Hello"}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : "2020-01-01"}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : "2020-01-01 00:00:00.000"}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
|
||||
select JSONExtract(materialize('{"d" : [1, 2, 3]}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : ["str1", "str2", "str3"]}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : [[[1], [2, 3, 4]], [[5, 6], [7]]]}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
|
||||
select JSONExtract(materialize('{"d" : ["2020-01-01", "2020-01-01 00:00:00"]}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : ["2020-01-01", "2020-01-01 date"]}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : ["2020-01-01", "2020-01-01 00:00:00", "str"]}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : ["2020-01-01", "2020-01-01 00:00:00", "42"]}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : ["str", "42"]}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : [42, 42.42]}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : [42, 18446744073709551615, 42.42]}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : [42, 42.42]}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
|
||||
select JSONExtract(materialize('{"d" : [null, null]}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : [null, 42]}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : [[null], [], [42]]}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"a" : [[], [null, null], ["1", null, "3"], [null, "2", null]]}'), 'a', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"a" : [[], [null, null], ["1", null, "3"], [null, "2", null], ["2020-01-01"]]}'), 'a', 'Dynamic') as d, dynamicType(d);
|
||||
|
||||
select JSONExtract(materialize('{"d" : ["str", 42, [42]]}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : [42, 18446744073709551615]}'), 'd', 'Dynamic') as d, dynamicType(d);
|
||||
select JSONExtract(materialize('{"d" : [-42, 18446744073709551615]}'), 'd', 'Dynamic') as d, dynamicType(d);
|
@ -1900,11 +1900,13 @@ kurtosis
|
||||
kurtpop
|
||||
kurtsamp
|
||||
laion
|
||||
lagInFrame
|
||||
lang
|
||||
laravel
|
||||
largestTriangleThreeBuckets
|
||||
latencies
|
||||
ldap
|
||||
leadInFrame
|
||||
leftPad
|
||||
leftPadUTF
|
||||
leftUTF
|
||||
|
Loading…
Reference in New Issue
Block a user