Compare commits

...

33 Commits

Author SHA1 Message Date
Robert Schulze
0eb0830a13
Merge 20b25566f5 into d793e06860 2024-09-16 15:26:08 +02:00
Rich Raposa
d793e06860
Merge pull request #69622 from Olexandr88/patch-1
Docs: Update index
2024-09-16 13:12:30 +00:00
vdimir
1986fb1418
Merge pull request #68595 from ClickHouse/vdimir/fix_function_printf_style
Fix style in Functions/printf.cpp
2024-09-16 12:34:31 +00:00
Robert Schulze
20b25566f5
Remove superfluous --multiquery/-n 2024-09-16 11:30:56 +00:00
Mikhail f. Shiryaev
f36408a666
Merge pull request #69599 from ClickHouse/local-debug
Improve debug step in actions
2024-09-16 10:20:12 +00:00
Yarik Briukhovetskyi
de85f5f251
empty commit (I've changed the changelog entry) 2024-09-16 12:15:11 +02:00
Oleksandr
85af661b9c
Docs: Update index 2024-09-16 12:57:24 +03:00
Antonio Andelic
b42c6491e4
Merge pull request #69578 from ClickHouse/issues/68932/enable_in_ci
enable removeRecursive in CI
2024-09-16 09:03:11 +00:00
Robert Schulze
1a4c7b7c61
Merge pull request #69493 from ucasfl/vector-index-insert
Speedup insert performance of vector similarity index by parallelization
2024-09-16 08:54:02 +00:00
Oleksandr
14feba8443
Docs: Update index 2024-09-16 11:42:09 +03:00
vdimir
4c4a051d5e
Merge pull request #69075 from kirillgarbar/remove-stale-moving-parts
Remove stale moving parts without zookeeper
2024-09-16 08:02:05 +00:00
Vitaly Baranov
a55cc03973
Merge pull request #69611 from vitlibar/masking-sensitive-info-in-gcs-table-function
Masking sensitive info in gcs() table function
2024-09-16 07:58:17 +00:00
Robert Schulze
37411bf240
Fix sizing with unconstrained thread pool size 2024-09-15 15:06:14 +00:00
Vitaly Baranov
a461d20af9 Masking sensitive info in gcs() table function. 2024-09-13 23:03:56 +02:00
Mikhail f. Shiryaev
b55d0b54ea
Merge steps together to minimize grouping 2024-09-13 17:35:09 +02:00
Mikhail f. Shiryaev
418ef3f8bc
Use local debug action in every action 2024-09-13 17:20:49 +02:00
Mikhail f. Shiryaev
b420bbf855
Improve debug action 2024-09-13 17:17:10 +02:00
Кирилл Гарбар
6a7cfd13f7 Set PRESERVE_BLOBS if part is fetched from another replica 2024-09-13 15:25:17 +03:00
Mikhail Artemenko
baf6aaef1d fix tests 2024-09-13 11:32:33 +00:00
Robert Schulze
9ca149a487
Fix GWP-asan crash 2024-09-13 11:07:09 +00:00
Mikhail Artemenko
042194e3f6 enable removeRecursive in CI 2024-09-13 08:50:28 +00:00
Кирилл Гарбар
120e38c72a Merge remote-tracking branch 'kirillgarbar/master' into remove-stale-moving-parts 2024-09-12 19:26:58 +03:00
Robert Schulze
38b5ea9066
Fix docs 2024-09-12 12:43:27 +00:00
Robert Schulze
fe5e061fff
Some fixups 2024-09-12 10:38:14 +00:00
flynn
f6b965872f Merge branch 'master' of github.com:ClickHouse/ClickHouse into vector-index-insert 2024-09-12 06:40:33 +00:00
flynn
22c3b71196 Make vector similarity index creation thread pool globally 2024-09-12 03:54:25 +00:00
flynn
7425d4aa1a remove blank line 2024-09-11 10:12:42 +00:00
flynn
cf12e3924f Speedup insert data with vector similarity index by add data to index parallel 2024-09-11 09:31:46 +00:00
vdimir
cfc931160d
Merge branch 'master' into vdimir/fix_function_printf_style 2024-09-02 16:05:02 +02:00
Кирилл Гарбар
b2c4b771d8 Minor fixes 2024-08-29 19:33:04 +03:00
Кирилл Гарбар
edf4e09fb2 Remove stale moving parts without zookeeper 2024-08-29 18:46:06 +03:00
vdimir
07f44fdb89
Merge branch 'master' into vdimir/fix_function_printf_style 2024-08-22 11:23:50 +02:00
vdimir
2fcbe2465a
Fix style in Functions/printf.cpp 2024-08-20 09:07:15 +00:00
115 changed files with 626 additions and 350 deletions

View File

@ -4,15 +4,31 @@ description: Prints workflow debug info
runs: runs:
using: "composite" using: "composite"
steps: steps:
- name: Print envs - name: Envs, event.json and contexts
shell: bash shell: bash
run: | run: |
echo "::group::Envs" echo '::group::Environment variables'
env env | sort
echo "::endgroup::" echo '::endgroup::'
- name: Print Event.json
shell: bash echo '::group::event.json'
run: |
echo "::group::Event.json"
python3 -m json.tool "$GITHUB_EVENT_PATH" python3 -m json.tool "$GITHUB_EVENT_PATH"
echo "::endgroup::" echo '::endgroup::'
cat << 'EOF'
::group::github context
${{ toJSON(github) }}
::endgroup::
::group::env context
${{ toJSON(env) }}
::endgroup::
::group::runner context
${{ toJSON(runner) }}
::endgroup::
::group::job context
${{ toJSON(job) }}
::endgroup::
EOF

View File

@ -27,6 +27,8 @@ jobs:
clear-repository: true # to ensure correct digests clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version fetch-depth: 0 # to get version
filter: tree:0 filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: Labels check - name: Labels check
run: | run: |
cd "$GITHUB_WORKSPACE/tests/ci" cd "$GITHUB_WORKSPACE/tests/ci"

View File

@ -33,6 +33,8 @@ jobs:
clear-repository: true clear-repository: true
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
fetch-depth: 0 fetch-depth: 0
- name: Debug Info
uses: ./.github/actions/debug
- name: Cherry pick - name: Cherry pick
run: | run: |
cd "$GITHUB_WORKSPACE/tests/ci" cd "$GITHUB_WORKSPACE/tests/ci"

View File

@ -56,13 +56,13 @@ jobs:
GH_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} GH_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
runs-on: [self-hosted, release-maker] runs-on: [self-hosted, release-maker]
steps: steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code - name: Check out repository code
uses: ClickHouse/checkout@v1 uses: ClickHouse/checkout@v1
with: with:
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
fetch-depth: 0 fetch-depth: 0
- name: Debug Info
uses: ./.github/actions/debug
- name: Prepare Release Info - name: Prepare Release Info
shell: bash shell: bash
run: | run: |

View File

@ -11,6 +11,7 @@ name: Build docker images
required: false required: false
type: boolean type: boolean
default: false default: false
jobs: jobs:
DockerBuildAarch64: DockerBuildAarch64:
runs-on: [self-hosted, style-checker-aarch64] runs-on: [self-hosted, style-checker-aarch64]

View File

@ -8,20 +8,21 @@ on: # yamllint disable-line rule:truthy
schedule: schedule:
- cron: '0 */6 * * *' - cron: '0 */6 * * *'
workflow_dispatch: workflow_dispatch:
jobs: jobs:
RunConfig: RunConfig:
runs-on: [self-hosted, style-checker-aarch64] runs-on: [self-hosted, style-checker-aarch64]
outputs: outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }} data: ${{ steps.runconfig.outputs.CI_DATA }}
steps: steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code - name: Check out repository code
uses: ClickHouse/checkout@v1 uses: ClickHouse/checkout@v1
with: with:
clear-repository: true # to ensure correct digests clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version fetch-depth: 0 # to get version
filter: tree:0 filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: PrepareRunConfig - name: PrepareRunConfig
id: runconfig id: runconfig
run: | run: |

View File

@ -15,14 +15,14 @@ jobs:
outputs: outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }} data: ${{ steps.runconfig.outputs.CI_DATA }}
steps: steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code - name: Check out repository code
uses: ClickHouse/checkout@v1 uses: ClickHouse/checkout@v1
with: with:
clear-repository: true # to ensure correct digests clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version fetch-depth: 0 # to get version
filter: tree:0 filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: Merge sync PR - name: Merge sync PR
run: | run: |
cd "$GITHUB_WORKSPACE/tests/ci" cd "$GITHUB_WORKSPACE/tests/ci"

View File

@ -14,14 +14,14 @@ jobs:
outputs: outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }} data: ${{ steps.runconfig.outputs.CI_DATA }}
steps: steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code - name: Check out repository code
uses: ClickHouse/checkout@v1 uses: ClickHouse/checkout@v1
with: with:
clear-repository: true # to ensure correct digests clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get a version fetch-depth: 0 # to get a version
filter: tree:0 filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: Cancel PR workflow - name: Cancel PR workflow
run: | run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run

View File

@ -15,14 +15,14 @@ jobs:
outputs: outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }} data: ${{ steps.runconfig.outputs.CI_DATA }}
steps: steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code - name: Check out repository code
uses: ClickHouse/checkout@v1 uses: ClickHouse/checkout@v1
with: with:
clear-repository: true # to ensure correct digests clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version fetch-depth: 0 # to get version
filter: tree:0 filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: PrepareRunConfig - name: PrepareRunConfig
id: runconfig id: runconfig
run: | run: |

View File

@ -25,14 +25,14 @@ jobs:
outputs: outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }} data: ${{ steps.runconfig.outputs.CI_DATA }}
steps: steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code - name: Check out repository code
uses: ClickHouse/checkout@v1 uses: ClickHouse/checkout@v1
with: with:
clear-repository: true # to ensure correct digests clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get a version fetch-depth: 0 # to get a version
filter: tree:0 filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: Cancel previous Sync PR workflow - name: Cancel previous Sync PR workflow
run: | run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run

View File

@ -24,6 +24,8 @@ jobs:
clear-repository: true # to ensure correct digests clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version fetch-depth: 0 # to get version
filter: tree:0 filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: Labels check - name: Labels check
run: | run: |
cd "$GITHUB_WORKSPACE/tests/ci" cd "$GITHUB_WORKSPACE/tests/ci"

View File

@ -62,8 +62,6 @@ jobs:
env: env:
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}} GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}
steps: steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code - name: Check out repository code
uses: ClickHouse/checkout@v1 uses: ClickHouse/checkout@v1
with: with:
@ -72,6 +70,8 @@ jobs:
submodules: ${{inputs.submodules}} submodules: ${{inputs.submodules}}
fetch-depth: ${{inputs.checkout_depth}} fetch-depth: ${{inputs.checkout_depth}}
filter: tree:0 filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: Set build envs - name: Set build envs
run: | run: |
cat >> "$GITHUB_ENV" << 'EOF' cat >> "$GITHUB_ENV" << 'EOF'

View File

@ -13,16 +13,17 @@ Here is a complete list of available database engines. Follow the links for more
- [Atomic](../../engines/database-engines/atomic.md) - [Atomic](../../engines/database-engines/atomic.md)
- [MySQL](../../engines/database-engines/mysql.md) - [Lazy](../../engines/database-engines/lazy.md)
- [MaterializedPostgreSQL](../../engines/database-engines/materialized-postgresql.md)
- [MaterializedMySQL](../../engines/database-engines/materialized-mysql.md) - [MaterializedMySQL](../../engines/database-engines/materialized-mysql.md)
- [Lazy](../../engines/database-engines/lazy.md) - [MySQL](../../engines/database-engines/mysql.md)
- [PostgreSQL](../../engines/database-engines/postgresql.md) - [PostgreSQL](../../engines/database-engines/postgresql.md)
- [MaterializedPostgreSQL](../../engines/database-engines/materialized-postgresql.md)
- [Replicated](../../engines/database-engines/replicated.md) - [Replicated](../../engines/database-engines/replicated.md)
- [SQLite](../../engines/database-engines/sqlite.md) - [SQLite](../../engines/database-engines/sqlite.md)

View File

@ -107,6 +107,10 @@ The vector similarity index currently does not work with per-table, non-default
[here](https://github.com/ClickHouse/ClickHouse/pull/51325#issuecomment-1605920475)). If necessary, the value must be changed in config.xml. [here](https://github.com/ClickHouse/ClickHouse/pull/51325#issuecomment-1605920475)). If necessary, the value must be changed in config.xml.
::: :::
Vector index creation is known to be slow. To speed the process up, index creation can be parallelized. The maximum number of threads can be
configured using server configuration
setting [max_build_vector_similarity_index_thread_pool_size](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters_max_build_vector_similarity_index_thread_pool_size).
ANN indexes are built during column insertion and merge. As a result, `INSERT` and `OPTIMIZE` statements will be slower than for ordinary ANN indexes are built during column insertion and merge. As a result, `INSERT` and `OPTIMIZE` statements will be slower than for ordinary
tables. ANNIndexes are ideally used only with immutable or rarely changed data, respectively when are far more read requests than write tables. ANNIndexes are ideally used only with immutable or rarely changed data, respectively when are far more read requests than write
requests. requests.

View File

@ -491,6 +491,14 @@ Type: Double
Default: 0.9 Default: 0.9
## max_build_vector_similarity_index_thread_pool_size {#server_configuration_parameters_max_build_vector_similarity_index_thread_pool_size}
The maximum number of threads to use for building vector indexes. 0 means all cores.
Type: UInt64
Default: 16
## cgroups_memory_usage_observer_wait_time ## cgroups_memory_usage_observer_wait_time
Interval in seconds during which the server's maximum allowed memory consumption is adjusted by the corresponding threshold in cgroups. (see Interval in seconds during which the server's maximum allowed memory consumption is adjusted by the corresponding threshold in cgroups. (see

View File

@ -178,6 +178,9 @@
M(ObjectStorageAzureThreads, "Number of threads in the AzureObjectStorage thread pool.") \ M(ObjectStorageAzureThreads, "Number of threads in the AzureObjectStorage thread pool.") \
M(ObjectStorageAzureThreadsActive, "Number of threads in the AzureObjectStorage thread pool running a task.") \ M(ObjectStorageAzureThreadsActive, "Number of threads in the AzureObjectStorage thread pool running a task.") \
M(ObjectStorageAzureThreadsScheduled, "Number of queued or active jobs in the AzureObjectStorage thread pool.") \ M(ObjectStorageAzureThreadsScheduled, "Number of queued or active jobs in the AzureObjectStorage thread pool.") \
M(BuildVectorSimilarityIndexThreads, "Number of threads in the build vector similarity index thread pool.") \
M(BuildVectorSimilarityIndexThreadsActive, "Number of threads in the build vector similarity index thread pool running a task.") \
M(BuildVectorSimilarityIndexThreadsScheduled, "Number of queued or active jobs in the build vector similarity index thread pool.") \
\ \
M(DiskPlainRewritableAzureDirectoryMapSize, "Number of local-to-remote path entries in the 'plain_rewritable' in-memory map for AzureObjectStorage.") \ M(DiskPlainRewritableAzureDirectoryMapSize, "Number of local-to-remote path entries in the 'plain_rewritable' in-memory map for AzureObjectStorage.") \
M(DiskPlainRewritableLocalDirectoryMapSize, "Number of local-to-remote path entries in the 'plain_rewritable' in-memory map for LocalObjectStorage.") \ M(DiskPlainRewritableLocalDirectoryMapSize, "Number of local-to-remote path entries in the 'plain_rewritable' in-memory map for LocalObjectStorage.") \

View File

@ -63,6 +63,7 @@ static struct InitFiu
REGULAR(keepermap_fail_drop_data) \ REGULAR(keepermap_fail_drop_data) \
REGULAR(lazy_pipe_fds_fail_close) \ REGULAR(lazy_pipe_fds_fail_close) \
PAUSEABLE(infinite_sleep) \ PAUSEABLE(infinite_sleep) \
PAUSEABLE(stop_moving_part_before_swap_with_active) \
namespace FailPoints namespace FailPoints

View File

@ -50,7 +50,7 @@ namespace DB
M(UInt32, asynchronous_heavy_metrics_update_period_s, 120, "Period in seconds for updating heavy asynchronous metrics.", 0) \ M(UInt32, asynchronous_heavy_metrics_update_period_s, 120, "Period in seconds for updating heavy asynchronous metrics.", 0) \
M(String, default_database, "default", "Default database name.", 0) \ M(String, default_database, "default", "Default database name.", 0) \
M(String, tmp_policy, "", "Policy for storage with temporary data.", 0) \ M(String, tmp_policy, "", "Policy for storage with temporary data.", 0) \
M(UInt64, max_temporary_data_on_disk_size, 0, "The maximum amount of storage that could be used for external aggregation, joins or sorting., ", 0) \ M(UInt64, max_temporary_data_on_disk_size, 0, "The maximum amount of storage that could be used for external aggregation, joins or sorting.", 0) \
M(String, temporary_data_in_cache, "", "Cache disk name for temporary data.", 0) \ M(String, temporary_data_in_cache, "", "Cache disk name for temporary data.", 0) \
M(UInt64, aggregate_function_group_array_max_element_size, 0xFFFFFF, "Max array element size in bytes for groupArray function. This limit is checked at serialization and help to avoid large state size.", 0) \ M(UInt64, aggregate_function_group_array_max_element_size, 0xFFFFFF, "Max array element size in bytes for groupArray function. This limit is checked at serialization and help to avoid large state size.", 0) \
M(GroupArrayActionWhenLimitReached, aggregate_function_group_array_action_when_limit_is_reached, GroupArrayActionWhenLimitReached::THROW, "Action to execute when max array element size is exceeded in groupArray: `throw` exception, or `discard` extra values", 0) \ M(GroupArrayActionWhenLimitReached, aggregate_function_group_array_action_when_limit_is_reached, GroupArrayActionWhenLimitReached::THROW, "Action to execute when max array element size is exceeded in groupArray: `throw` exception, or `discard` extra values", 0) \
@ -65,6 +65,7 @@ namespace DB
M(UInt64, async_insert_threads, 16, "Maximum number of threads to actually parse and insert data in background. Zero means asynchronous mode is disabled", 0) \ M(UInt64, async_insert_threads, 16, "Maximum number of threads to actually parse and insert data in background. Zero means asynchronous mode is disabled", 0) \
M(Bool, async_insert_queue_flush_on_shutdown, true, "If true queue of asynchronous inserts is flushed on graceful shutdown", 0) \ M(Bool, async_insert_queue_flush_on_shutdown, true, "If true queue of asynchronous inserts is flushed on graceful shutdown", 0) \
M(Bool, ignore_empty_sql_security_in_create_view_query, true, "If true, ClickHouse doesn't write defaults for empty SQL security statement in CREATE VIEW queries. This setting is only necessary for the migration period and will become obsolete in 24.4", 0) \ M(Bool, ignore_empty_sql_security_in_create_view_query, true, "If true, ClickHouse doesn't write defaults for empty SQL security statement in CREATE VIEW queries. This setting is only necessary for the migration period and will become obsolete in 24.4", 0) \
M(UInt64, max_build_vector_similarity_index_thread_pool_size, 16, "The maximum number of threads to use to build vector similarity indexes. 0 means all cores.", 0) \
\ \
/* Database Catalog */ \ /* Database Catalog */ \
M(UInt64, database_atomic_delay_before_drop_table_sec, 8 * 60, "The delay during which a dropped table can be restored using the UNDROP statement. If DROP TABLE ran with a SYNC modifier, the setting is ignored.", 0) \ M(UInt64, database_atomic_delay_before_drop_table_sec, 8 * 60, "The delay during which a dropped table can be restored using the UNDROP statement. If DROP TABLE ran with a SYNC modifier, the setting is ignored.", 0) \

View File

@ -50,13 +50,6 @@ private:
return executeNonconstant(input); return executeNonconstant(input);
} }
[[maybe_unused]] String toString() const
{
WriteBufferFromOwnString buf;
buf << "format:" << format << ", rows:" << rows << ", is_literal:" << is_literal << ", input:" << input.dumpStructure() << "\n";
return buf.str();
}
private: private:
ColumnWithTypeAndName executeLiteral(std::string_view literal) const ColumnWithTypeAndName executeLiteral(std::string_view literal) const
{ {
@ -231,9 +224,7 @@ public:
const auto & instruction = instructions[i]; const auto & instruction = instructions[i];
try try
{ {
// std::cout << "instruction[" << i << "]:" << instructions[i].toString() << std::endl;
concat_args[i] = instruction.execute(); concat_args[i] = instruction.execute();
// std::cout << "concat_args[" << i << "]:" << concat_args[i].dumpStructure() << std::endl;
} }
catch (const fmt::v9::format_error & e) catch (const fmt::v9::format_error & e)
{ {
@ -358,7 +349,14 @@ private:
REGISTER_FUNCTION(Printf) REGISTER_FUNCTION(Printf)
{ {
factory.registerFunction<FunctionPrintf>(); factory.registerFunction<FunctionPrintf>(
FunctionDocumentation{.description=R"(
The `printf` function formats the given string with the values (strings, integers, floating-points etc.) listed in the arguments, similar to printf function in C++.
The format string can contain format specifiers starting with `%` character.
Anything not contained in `%` and the following format specifier is considered literal text and copied verbatim into the output.
Literal `%` character can be escaped by `%%`.)", .examples{{"sum", "select printf('%%%s %s %d', 'Hello', 'World', 2024);", "%Hello World 2024"}}, .categories{"String"}
});
} }
} }

View File

@ -10,6 +10,7 @@
#include <Common/SensitiveDataMasker.h> #include <Common/SensitiveDataMasker.h>
#include <Common/Macros.h> #include <Common/Macros.h>
#include <Common/EventNotifier.h> #include <Common/EventNotifier.h>
#include <Common/getNumberOfPhysicalCPUCores.h>
#include <Common/Stopwatch.h> #include <Common/Stopwatch.h>
#include <Common/formatReadable.h> #include <Common/formatReadable.h>
#include <Common/Throttler.h> #include <Common/Throttler.h>
@ -121,7 +122,6 @@
#include <Interpreters/InterpreterSelectWithUnionQuery.h> #include <Interpreters/InterpreterSelectWithUnionQuery.h>
#include <base/defines.h> #include <base/defines.h>
namespace fs = std::filesystem; namespace fs = std::filesystem;
namespace ProfileEvents namespace ProfileEvents
@ -164,6 +164,9 @@ namespace CurrentMetrics
extern const Metric TablesLoaderForegroundThreadsActive; extern const Metric TablesLoaderForegroundThreadsActive;
extern const Metric TablesLoaderForegroundThreadsScheduled; extern const Metric TablesLoaderForegroundThreadsScheduled;
extern const Metric IOWriterThreadsScheduled; extern const Metric IOWriterThreadsScheduled;
extern const Metric BuildVectorSimilarityIndexThreads;
extern const Metric BuildVectorSimilarityIndexThreadsActive;
extern const Metric BuildVectorSimilarityIndexThreadsScheduled;
extern const Metric AttachedTable; extern const Metric AttachedTable;
extern const Metric AttachedView; extern const Metric AttachedView;
extern const Metric AttachedDictionary; extern const Metric AttachedDictionary;
@ -297,6 +300,8 @@ struct ContextSharedPart : boost::noncopyable
mutable std::unique_ptr<ThreadPool> load_marks_threadpool; /// Threadpool for loading marks cache. mutable std::unique_ptr<ThreadPool> load_marks_threadpool; /// Threadpool for loading marks cache.
mutable OnceFlag prefetch_threadpool_initialized; mutable OnceFlag prefetch_threadpool_initialized;
mutable std::unique_ptr<ThreadPool> prefetch_threadpool; /// Threadpool for loading marks cache. mutable std::unique_ptr<ThreadPool> prefetch_threadpool; /// Threadpool for loading marks cache.
mutable OnceFlag build_vector_similarity_index_threadpool_initialized;
mutable std::unique_ptr<ThreadPool> build_vector_similarity_index_threadpool; /// Threadpool for vector-similarity index creation.
mutable UncompressedCachePtr index_uncompressed_cache TSA_GUARDED_BY(mutex); /// The cache of decompressed blocks for MergeTree indices. mutable UncompressedCachePtr index_uncompressed_cache TSA_GUARDED_BY(mutex); /// The cache of decompressed blocks for MergeTree indices.
mutable QueryCachePtr query_cache TSA_GUARDED_BY(mutex); /// Cache of query results. mutable QueryCachePtr query_cache TSA_GUARDED_BY(mutex); /// Cache of query results.
mutable MarkCachePtr index_mark_cache TSA_GUARDED_BY(mutex); /// Cache of marks in compressed files of MergeTree indices. mutable MarkCachePtr index_mark_cache TSA_GUARDED_BY(mutex); /// Cache of marks in compressed files of MergeTree indices.
@ -3297,6 +3302,21 @@ size_t Context::getPrefetchThreadpoolSize() const
return config.getUInt(".prefetch_threadpool_pool_size", 100); return config.getUInt(".prefetch_threadpool_pool_size", 100);
} }
ThreadPool & Context::getBuildVectorSimilarityIndexThreadPool() const
{
callOnce(shared->build_vector_similarity_index_threadpool_initialized, [&] {
size_t pool_size = shared->server_settings.max_build_vector_similarity_index_thread_pool_size > 0
? shared->server_settings.max_build_vector_similarity_index_thread_pool_size
: getNumberOfPhysicalCPUCores();
shared->build_vector_similarity_index_threadpool = std::make_unique<ThreadPool>(
CurrentMetrics::BuildVectorSimilarityIndexThreads,
CurrentMetrics::BuildVectorSimilarityIndexThreadsActive,
CurrentMetrics::BuildVectorSimilarityIndexThreadsScheduled,
pool_size);
});
return *shared->build_vector_similarity_index_threadpool;
}
BackgroundSchedulePool & Context::getBufferFlushSchedulePool() const BackgroundSchedulePool & Context::getBufferFlushSchedulePool() const
{ {
callOnce(shared->buffer_flush_schedule_pool_initialized, [&] { callOnce(shared->buffer_flush_schedule_pool_initialized, [&] {

View File

@ -1097,6 +1097,8 @@ public:
/// and make a prefetch by putting a read task to threadpoolReader. /// and make a prefetch by putting a read task to threadpoolReader.
size_t getPrefetchThreadpoolSize() const; size_t getPrefetchThreadpoolSize() const;
ThreadPool & getBuildVectorSimilarityIndexThreadPool() const;
/// Settings for MergeTree background tasks stored in config.xml /// Settings for MergeTree background tasks stored in config.xml
BackgroundTaskSchedulingSettings getBackgroundProcessingTaskSchedulingSettings() const; BackgroundTaskSchedulingSettings getBackgroundProcessingTaskSchedulingSettings() const;
BackgroundTaskSchedulingSettings getBackgroundMoveTaskSchedulingSettings() const; BackgroundTaskSchedulingSettings getBackgroundMoveTaskSchedulingSettings() const;

View File

@ -74,7 +74,8 @@ private:
findMySQLFunctionSecretArguments(); findMySQLFunctionSecretArguments();
} }
else if ((function.name == "s3") || (function.name == "cosn") || (function.name == "oss") || else if ((function.name == "s3") || (function.name == "cosn") || (function.name == "oss") ||
(function.name == "deltaLake") || (function.name == "hudi") || (function.name == "iceberg")) (function.name == "deltaLake") || (function.name == "hudi") || (function.name == "iceberg") ||
(function.name == "gcs"))
{ {
/// s3('url', 'aws_access_key_id', 'aws_secret_access_key', ...) /// s3('url', 'aws_access_key_id', 'aws_secret_access_key', ...)
findS3FunctionSecretArguments(/* is_cluster_function= */ false); findS3FunctionSecretArguments(/* is_cluster_function= */ false);

View File

@ -5,9 +5,11 @@
#include <Columns/ColumnArray.h> #include <Columns/ColumnArray.h>
#include <Common/BitHelpers.h> #include <Common/BitHelpers.h>
#include <Common/formatReadable.h> #include <Common/formatReadable.h>
#include <Common/getNumberOfPhysicalCPUCores.h>
#include <Common/logger_useful.h> #include <Common/logger_useful.h>
#include <Common/typeid_cast.h> #include <Common/typeid_cast.h>
#include <Core/Field.h> #include <Core/Field.h>
#include <Core/ServerSettings.h>
#include <DataTypes/DataTypeArray.h> #include <DataTypes/DataTypeArray.h>
#include <IO/ReadHelpers.h> #include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>
@ -29,7 +31,6 @@ namespace DB
namespace ErrorCodes namespace ErrorCodes
{ {
extern const int CANNOT_ALLOCATE_MEMORY;
extern const int FORMAT_VERSION_TOO_OLD; extern const int FORMAT_VERSION_TOO_OLD;
extern const int ILLEGAL_COLUMN; extern const int ILLEGAL_COLUMN;
extern const int INCORRECT_DATA; extern const int INCORRECT_DATA;
@ -131,8 +132,7 @@ void USearchIndexWithSerialization::deserialize(ReadBuffer & istr)
/// See the comment in MergeTreeIndexGranuleVectorSimilarity::deserializeBinary why we throw here /// See the comment in MergeTreeIndexGranuleVectorSimilarity::deserializeBinary why we throw here
throw Exception(ErrorCodes::INCORRECT_DATA, "Could not load vector similarity index. Please drop the index and create it again. Error: {}", String(result.error.release())); throw Exception(ErrorCodes::INCORRECT_DATA, "Could not load vector similarity index. Please drop the index and create it again. Error: {}", String(result.error.release()));
if (!try_reserve(limits())) try_reserve(limits());
throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Could not reserve memory for usearch index");
} }
USearchIndexWithSerialization::Statistics USearchIndexWithSerialization::getStatistics() const USearchIndexWithSerialization::Statistics USearchIndexWithSerialization::getStatistics() const
@ -270,20 +270,49 @@ void updateImpl(const ColumnArray * column_array, const ColumnArray::Offsets & c
throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column with vector similarity index must have equal length"); throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column with vector similarity index must have equal length");
/// Reserving space is mandatory /// Reserving space is mandatory
if (!index->try_reserve(roundUpToPowerOfTwoOrZero(index->size() + rows))) size_t max_thread_pool_size = Context::getGlobalContextInstance()->getServerSettings().max_build_vector_similarity_index_thread_pool_size;
throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Could not reserve memory for vector similarity index"); if (max_thread_pool_size == 0)
max_thread_pool_size = getNumberOfPhysicalCPUCores();
unum::usearch::index_limits_t limits(roundUpToPowerOfTwoOrZero(index->size() + rows), max_thread_pool_size);
index->reserve(limits);
for (size_t row = 0; row < rows; ++row) /// Vector index creation is slooooow. Add the new rows in parallel. The threadpool is global to avoid oversubscription when multiple
/// indexes are build simultaneously (e.g. multiple merges run at the same time).
auto & thread_pool = Context::getGlobalContextInstance()->getBuildVectorSimilarityIndexThreadPool();
auto add_vector_to_index = [&](USearchIndex::vector_key_t key, size_t row, ThreadGroupPtr thread_group)
{
SCOPE_EXIT_SAFE(
if (thread_group)
CurrentThread::detachFromGroupIfNotDetached();
);
if (thread_group)
CurrentThread::attachToGroupIfDetached(thread_group);
/// add is thread-safe
if (auto result = index->add(key, &column_array_data_float_data[column_array_offsets[row - 1]]); !result)
{ {
if (auto result = index->add(static_cast<USearchIndex::vector_key_t>(index->size()), &column_array_data_float_data[column_array_offsets[row - 1]]); !result)
throw Exception(ErrorCodes::INCORRECT_DATA, "Could not add data to vector similarity index. Error: {}", String(result.error.release())); throw Exception(ErrorCodes::INCORRECT_DATA, "Could not add data to vector similarity index. Error: {}", String(result.error.release()));
}
else else
{ {
ProfileEvents::increment(ProfileEvents::USearchAddCount); ProfileEvents::increment(ProfileEvents::USearchAddCount);
ProfileEvents::increment(ProfileEvents::USearchAddVisitedMembers, result.visited_members); ProfileEvents::increment(ProfileEvents::USearchAddVisitedMembers, result.visited_members);
ProfileEvents::increment(ProfileEvents::USearchAddComputedDistances, result.computed_distances); ProfileEvents::increment(ProfileEvents::USearchAddComputedDistances, result.computed_distances);
} }
};
size_t index_size = index->size();
for (size_t row = 0; row < rows; ++row)
{
auto key = static_cast<USearchIndex::vector_key_t>(index_size + row);
auto task = [group = CurrentThread::getGroup(), &add_vector_to_index, key, row] { add_vector_to_index(key, row, group); };
thread_pool.scheduleOrThrowOnError(task);
} }
thread_pool.wait();
} }
} }

View File

@ -1,6 +1,7 @@
#include <Storages/MergeTree/MergeTreeData.h> #include <Storages/MergeTree/MergeTreeData.h>
#include <Storages/MergeTree/MergeTreePartsMover.h> #include <Storages/MergeTree/MergeTreePartsMover.h>
#include <Storages/MergeTree/MergeTreeSettings.h> #include <Storages/MergeTree/MergeTreeSettings.h>
#include <Common/FailPoint.h>
#include <Common/logger_useful.h> #include <Common/logger_useful.h>
#include <set> #include <set>
@ -15,6 +16,11 @@ namespace ErrorCodes
extern const int DIRECTORY_ALREADY_EXISTS; extern const int DIRECTORY_ALREADY_EXISTS;
} }
namespace FailPoints
{
extern const char stop_moving_part_before_swap_with_active[];
}
namespace namespace
{ {
@ -226,6 +232,7 @@ MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const Me
cloned_part.temporary_directory_lock = data->getTemporaryPartDirectoryHolder(part->name); cloned_part.temporary_directory_lock = data->getTemporaryPartDirectoryHolder(part->name);
MutableDataPartStoragePtr cloned_part_storage; MutableDataPartStoragePtr cloned_part_storage;
bool preserve_blobs = false;
if (disk->supportZeroCopyReplication() && settings->allow_remote_fs_zero_copy_replication) if (disk->supportZeroCopyReplication() && settings->allow_remote_fs_zero_copy_replication)
{ {
/// Try zero-copy replication and fallback to default copy if it's not possible /// Try zero-copy replication and fallback to default copy if it's not possible
@ -253,6 +260,7 @@ MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const Me
if (zero_copy_part) if (zero_copy_part)
{ {
/// FIXME for some reason we cannot just use this part, we have to re-create it through MergeTreeDataPartBuilder /// FIXME for some reason we cannot just use this part, we have to re-create it through MergeTreeDataPartBuilder
preserve_blobs = true;
zero_copy_part->is_temp = false; /// Do not remove it in dtor zero_copy_part->is_temp = false; /// Do not remove it in dtor
cloned_part_storage = zero_copy_part->getDataPartStoragePtr(); cloned_part_storage = zero_copy_part->getDataPartStoragePtr();
} }
@ -272,7 +280,17 @@ MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const Me
cloned_part.part = std::move(builder).withPartFormatFromDisk().build(); cloned_part.part = std::move(builder).withPartFormatFromDisk().build();
LOG_TRACE(log, "Part {} was cloned to {}", part->name, cloned_part.part->getDataPartStorage().getFullPath()); LOG_TRACE(log, "Part {} was cloned to {}", part->name, cloned_part.part->getDataPartStorage().getFullPath());
cloned_part.part->is_temp = data->allowRemoveStaleMovingParts(); cloned_part.part->is_temp = false;
if (data->allowRemoveStaleMovingParts())
{
cloned_part.part->is_temp = true;
/// Setting it in case connection to zookeeper is lost while moving
/// Otherwise part might be stuck in the moving directory due to the KEEPER_EXCEPTION in part's destructor
if (preserve_blobs)
cloned_part.part->remove_tmp_policy = IMergeTreeDataPart::BlobsRemovalPolicyForTemporaryParts::PRESERVE_BLOBS;
else
cloned_part.part->remove_tmp_policy = IMergeTreeDataPart::BlobsRemovalPolicyForTemporaryParts::REMOVE_BLOBS;
}
cloned_part.part->loadColumnsChecksumsIndexes(true, true); cloned_part.part->loadColumnsChecksumsIndexes(true, true);
cloned_part.part->loadVersionMetadata(); cloned_part.part->loadVersionMetadata();
cloned_part.part->modification_time = cloned_part.part->getDataPartStorage().getLastModified().epochTime(); cloned_part.part->modification_time = cloned_part.part->getDataPartStorage().getLastModified().epochTime();
@ -282,6 +300,8 @@ MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const Me
void MergeTreePartsMover::swapClonedPart(TemporaryClonedPart & cloned_part) const void MergeTreePartsMover::swapClonedPart(TemporaryClonedPart & cloned_part) const
{ {
/// Used to get some stuck parts in the moving directory by stopping moves while pause is active
FailPointInjection::pauseFailPoint(FailPoints::stop_moving_part_before_swap_with_active);
if (moves_blocker.isCancelled()) if (moves_blocker.isCancelled())
throw Exception(ErrorCodes::ABORTED, "Cancelled moving parts."); throw Exception(ErrorCodes::ABORTED, "Cancelled moving parts.");

View File

@ -42,6 +42,7 @@
<multi_read>1</multi_read> <multi_read>1</multi_read>
<check_not_exists>1</check_not_exists> <check_not_exists>1</check_not_exists>
<create_if_not_exists>1</create_if_not_exists> <create_if_not_exists>1</create_if_not_exists>
<remove_recursive>1</remove_recursive>
</feature_flags> </feature_flags>
</keeper_server> </keeper_server>
</clickhouse> </clickhouse>

View File

@ -105,7 +105,7 @@ setup_logs_replication
clickhouse-client --query "SHOW DATABASES" clickhouse-client --query "SHOW DATABASES"
clickhouse-client --query "CREATE DATABASE datasets" clickhouse-client --query "CREATE DATABASE datasets"
clickhouse-client --multiquery < /repo/tests/docker_scripts/create.sql clickhouse-client < /repo/tests/docker_scripts/create.sql
clickhouse-client --query "SHOW TABLES FROM datasets" clickhouse-client --query "SHOW TABLES FROM datasets"
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then

View File

@ -62,7 +62,7 @@ start_server
setup_logs_replication setup_logs_replication
clickhouse-client --query "CREATE DATABASE datasets" clickhouse-client --query "CREATE DATABASE datasets"
clickhouse-client --multiquery < /repo/tests/docker_scripts/create.sql clickhouse-client < /repo/tests/docker_scripts/create.sql
clickhouse-client --query "SHOW TABLES FROM datasets" clickhouse-client --query "SHOW TABLES FROM datasets"
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test" clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test"

View File

@ -64,6 +64,7 @@ function configure()
randomize_config_boolean_value multi_read keeper_port randomize_config_boolean_value multi_read keeper_port
randomize_config_boolean_value check_not_exists keeper_port randomize_config_boolean_value check_not_exists keeper_port
randomize_config_boolean_value create_if_not_exists keeper_port randomize_config_boolean_value create_if_not_exists keeper_port
randomize_config_boolean_value remove_recursive keeper_port
fi fi
sudo chown clickhouse /etc/clickhouse-server/config.d/keeper_port.xml sudo chown clickhouse /etc/clickhouse-server/config.d/keeper_port.xml

View File

@ -89,7 +89,6 @@ class Client:
command = self.command[:] command = self.command[:]
if stdin is None: if stdin is None:
command += ["--multiquery"]
stdin = sql stdin = sql
else: else:
command += ["--query", sql] command += ["--query", sql]

View File

@ -393,6 +393,7 @@ def test_table_functions():
f"azureBlobStorageCluster('test_shard_localhost', named_collection_2, connection_string = '{azure_conn_string}', container = 'cont', blob_path = 'test_simple_16.csv', format = 'CSV')", f"azureBlobStorageCluster('test_shard_localhost', named_collection_2, connection_string = '{azure_conn_string}', container = 'cont', blob_path = 'test_simple_16.csv', format = 'CSV')",
f"azureBlobStorageCluster('test_shard_localhost', named_collection_2, storage_account_url = '{azure_storage_account_url}', container = 'cont', blob_path = 'test_simple_17.csv', account_name = '{azure_account_name}', account_key = '{azure_account_key}')", f"azureBlobStorageCluster('test_shard_localhost', named_collection_2, storage_account_url = '{azure_storage_account_url}', container = 'cont', blob_path = 'test_simple_17.csv', account_name = '{azure_account_name}', account_key = '{azure_account_key}')",
f"iceberg('http://minio1:9001/root/data/test11.csv.gz', 'minio', '{password}')", f"iceberg('http://minio1:9001/root/data/test11.csv.gz', 'minio', '{password}')",
f"gcs('http://minio1:9001/root/data/test11.csv.gz', 'minio', '{password}')",
] ]
def make_test_case(i): def make_test_case(i):

View File

@ -0,0 +1,46 @@
<clickhouse>
<remote_servers>
<cluster>
<shard>
<replica>
<host>ch1</host>
<port>9000</port>
</replica>
</shard>
</cluster>
</remote_servers>
<macros>
<shard>01</shard>
</macros>
<storage_configuration>
<disks>
<s3>
<type>s3</type>
<endpoint>http://minio1:9001/root/data/</endpoint>
<access_key_id>minio</access_key_id>
<secret_access_key>minio123</secret_access_key>
</s3>
</disks>
<policies>
<s3>
<volumes>
<default>
<disk>default</disk>
<perform_ttl_move_on_insert>False</perform_ttl_move_on_insert>
</default>
<s3>
<disk>s3</disk>
<perform_ttl_move_on_insert>False</perform_ttl_move_on_insert>
</s3>
</volumes>
<move_factor>0.0</move_factor>
</s3>
</policies>
</storage_configuration>
<merge_tree>
<allow_remote_fs_zero_copy_replication>true</allow_remote_fs_zero_copy_replication>
<storage_policy>s3</storage_policy>
</merge_tree>
<allow_remove_stale_moving_parts>true</allow_remove_stale_moving_parts>
</clickhouse>

View File

@ -0,0 +1,117 @@
from pathlib import Path
import time
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
ch1 = cluster.add_instance(
"ch1",
main_configs=[
"config.xml",
],
macros={"replica": "node1"},
with_zookeeper=True,
with_minio=True,
)
DATABASE_NAME = "stale_moving_parts"
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def q(node, query):
return node.query(database=DATABASE_NAME, sql=query)
# .../disks/s3/store/
def get_table_path(node, table):
return (
node.query(
sql=f"SELECT data_paths FROM system.tables WHERE table = '{table}' and database = '{DATABASE_NAME}' LIMIT 1"
)
.strip('"\n[]')
.split(",")[1]
.strip("'")
)
def exec(node, cmd, path):
return node.exec_in_container(
[
"bash",
"-c",
f"{cmd} {path}",
]
)
def wait_part_is_stuck(node, table_moving_path, moving_part):
num_tries = 5
while q(node, "SELECT part_name FROM system.moves").strip() != moving_part:
if num_tries == 0:
raise Exception("Part has not started to move")
num_tries -= 1
time.sleep(1)
num_tries = 5
while exec(node, "ls", table_moving_path).strip() != moving_part:
if num_tries == 0:
raise Exception("Part is not stuck in the moving directory")
num_tries -= 1
time.sleep(1)
def wait_zookeeper_node_to_start(zk_nodes, timeout=60):
start = time.time()
while time.time() - start < timeout:
try:
for instance in zk_nodes:
conn = cluster.get_kazoo_client(instance)
conn.get_children("/")
print("All instances of ZooKeeper started")
return
except Exception as ex:
print(("Can't connect to ZooKeeper " + str(ex)))
time.sleep(0.5)
def test_remove_stale_moving_parts_without_zookeeper(started_cluster):
ch1.query(f"CREATE DATABASE IF NOT EXISTS {DATABASE_NAME}")
q(
ch1,
"CREATE TABLE test_remove ON CLUSTER cluster ( id UInt32 ) ENGINE ReplicatedMergeTree() ORDER BY id;",
)
table_moving_path = Path(get_table_path(ch1, "test_remove")) / "moving"
q(ch1, "SYSTEM ENABLE FAILPOINT stop_moving_part_before_swap_with_active")
q(ch1, "INSERT INTO test_remove SELECT number FROM numbers(100);")
moving_part = "all_0_0_0"
move_response = ch1.get_query_request(
sql=f"ALTER TABLE test_remove MOVE PART '{moving_part}' TO DISK 's3'",
database=DATABASE_NAME,
)
wait_part_is_stuck(ch1, table_moving_path, moving_part)
cluster.stop_zookeeper_nodes(["zoo1", "zoo2", "zoo3"])
# Stop moves in case table is not read-only yet
q(ch1, "SYSTEM STOP MOVES")
q(ch1, "SYSTEM DISABLE FAILPOINT stop_moving_part_before_swap_with_active")
assert "Cancelled moving parts" in move_response.get_error()
assert exec(ch1, "ls", table_moving_path).strip() == ""
cluster.start_zookeeper_nodes(["zoo1", "zoo2", "zoo3"])
wait_zookeeper_node_to_start(["zoo1", "zoo2", "zoo3"])
q(ch1, "SYSTEM START MOVES")
q(ch1, f"DROP TABLE test_remove")

View File

@ -427,7 +427,7 @@ do
done done
# for each query run, prepare array of metrics from query log # for each query run, prepare array of metrics from query log
clickhouse-local --multiquery --query " clickhouse-local --query "
create view query_runs as select * from file('analyze/query-runs.tsv', TSV, create view query_runs as select * from file('analyze/query-runs.tsv', TSV,
'test text, query_index int, query_id text, version UInt8, time float'); 'test text, query_index int, query_id text, version UInt8, time float');
@ -582,7 +582,7 @@ numactl --cpunodebind=all --membind=all numactl --show
# If the available memory falls below 2 * size, GNU parallel will suspend some of the running jobs. # If the available memory falls below 2 * size, GNU parallel will suspend some of the running jobs.
numactl --cpunodebind=all --membind=all parallel -v --joblog analyze/parallel-log.txt --memsuspend 15G --null < analyze/commands.txt 2>> analyze/errors.log numactl --cpunodebind=all --membind=all parallel -v --joblog analyze/parallel-log.txt --memsuspend 15G --null < analyze/commands.txt 2>> analyze/errors.log
clickhouse-local --multiquery --query " clickhouse-local --query "
-- Join the metric names back to the metric statistics we've calculated, and make -- Join the metric names back to the metric statistics we've calculated, and make
-- a denormalized table of them -- statistics for all metrics for all queries. -- a denormalized table of them -- statistics for all metrics for all queries.
-- The WITH, ARRAY JOIN and CROSS JOIN do not like each other: -- The WITH, ARRAY JOIN and CROSS JOIN do not like each other:
@ -680,7 +680,7 @@ rm ./*.{rep,svg} test-times.tsv test-dump.tsv unstable.tsv unstable-query-ids.ts
cat analyze/errors.log >> report/errors.log ||: cat analyze/errors.log >> report/errors.log ||:
cat profile-errors.log >> report/errors.log ||: cat profile-errors.log >> report/errors.log ||:
clickhouse-local --multiquery --query " clickhouse-local --query "
create view query_display_names as select * from create view query_display_names as select * from
file('analyze/query-display-names.tsv', TSV, file('analyze/query-display-names.tsv', TSV,
'test text, query_index int, query_display_name text') 'test text, query_index int, query_display_name text')
@ -981,7 +981,7 @@ create table all_query_metrics_tsv engine File(TSV, 'report/all-query-metrics.ts
for version in {right,left} for version in {right,left}
do do
rm -rf data rm -rf data
clickhouse-local --multiquery --query " clickhouse-local --query "
create view query_profiles as create view query_profiles as
with 0 as left, 1 as right with 0 as left, 1 as right
select * from file('analyze/query-profiles.tsv', TSV, select * from file('analyze/query-profiles.tsv', TSV,
@ -1151,7 +1151,7 @@ function report_metrics
rm -rf metrics ||: rm -rf metrics ||:
mkdir metrics mkdir metrics
clickhouse-local --multiquery --query " clickhouse-local --query "
create view right_async_metric_log as create view right_async_metric_log as
select * from file('right-async-metric-log.tsv', TSVWithNamesAndTypes) select * from file('right-async-metric-log.tsv', TSVWithNamesAndTypes)
; ;
@ -1211,7 +1211,7 @@ function upload_results
# Prepare info for the CI checks table. # Prepare info for the CI checks table.
rm -f ci-checks.tsv rm -f ci-checks.tsv
clickhouse-local --multiquery --query " clickhouse-local --query "
create view queries as select * from file('report/queries.tsv', TSVWithNamesAndTypes); create view queries as select * from file('report/queries.tsv', TSVWithNamesAndTypes);
create table ci_checks engine File(TSVWithNamesAndTypes, 'ci-checks.tsv') create table ci_checks engine File(TSVWithNamesAndTypes, 'ci-checks.tsv')

View File

@ -5,4 +5,4 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -n -q 'select 1 -- { clientError FOOBAR }' |& grep -o 'No error code with name:.*' $CLICKHOUSE_CLIENT -q 'select 1 -- { clientError FOOBAR }' |& grep -o 'No error code with name:.*'

View File

@ -12,14 +12,14 @@ echo "
DROP TABLE IF EXISTS rocksdb_race; DROP TABLE IF EXISTS rocksdb_race;
CREATE TABLE rocksdb_race (key String, value UInt32) Engine=EmbeddedRocksDB PRIMARY KEY(key); CREATE TABLE rocksdb_race (key String, value UInt32) Engine=EmbeddedRocksDB PRIMARY KEY(key);
INSERT INTO rocksdb_race SELECT '1_' || toString(number), number FROM numbers(100000); INSERT INTO rocksdb_race SELECT '1_' || toString(number), number FROM numbers(100000);
" | $CLICKHOUSE_CLIENT -n " | $CLICKHOUSE_CLIENT
function read_stat_thread() function read_stat_thread()
{ {
while true; do while true; do
echo " echo "
SELECT * FROM system.rocksdb FORMAT Null; SELECT * FROM system.rocksdb FORMAT Null;
" | $CLICKHOUSE_CLIENT -n " | $CLICKHOUSE_CLIENT
done done
} }
@ -29,7 +29,7 @@ function truncate_thread()
sleep 3s; sleep 3s;
echo " echo "
TRUNCATE TABLE rocksdb_race; TRUNCATE TABLE rocksdb_race;
" | $CLICKHOUSE_CLIENT -n " | $CLICKHOUSE_CLIENT
done done
} }

View File

@ -12,7 +12,7 @@ opts=(
--join_algorithm='parallel_hash' --join_algorithm='parallel_hash'
) )
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
CREATE TABLE t1(a UInt32, b UInt32) ENGINE=MergeTree ORDER BY (); CREATE TABLE t1(a UInt32, b UInt32) ENGINE=MergeTree ORDER BY ();
INSERT INTO t1 SELECT number, number FROM numbers_mt(1e6); INSERT INTO t1 SELECT number, number FROM numbers_mt(1e6);

View File

@ -5,12 +5,12 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
timeout -s INT 3s $CLICKHOUSE_CLIENT --max_block_size 1 -nm -q " timeout -s INT 3s $CLICKHOUSE_CLIENT --max_block_size 1 -m -q "
SELECT sleep(1) FROM numbers(100) FORMAT Null; SELECT sleep(1) FROM numbers(100) FORMAT Null;
SELECT 'FAIL'; SELECT 'FAIL';
" "
timeout -s INT 3s $CLICKHOUSE_LOCAL --max_block_size 1 -nm -q " timeout -s INT 3s $CLICKHOUSE_LOCAL --max_block_size 1 -m -q "
SELECT sleep(1) FROM numbers(100) FORMAT Null; SELECT sleep(1) FROM numbers(100) FORMAT Null;
SELECT 'FAIL'; SELECT 'FAIL';
" "

View File

@ -16,7 +16,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
$CLICKHOUSE_CLIENT --echo --query "SYSTEM DROP FILESYSTEM CACHE" $CLICKHOUSE_CLIENT --echo --query "SYSTEM DROP FILESYSTEM CACHE"
$CLICKHOUSE_CLIENT --echo -n --query "SELECT file_segment_range_begin, file_segment_range_end, size, state $CLICKHOUSE_CLIENT --echo --query "SELECT file_segment_range_begin, file_segment_range_end, size, state
FROM FROM
( (
SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path
@ -37,7 +37,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
$CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100)" $CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100)"
$CLICKHOUSE_CLIENT --echo -n --query "SELECT file_segment_range_begin, file_segment_range_end, size, state $CLICKHOUSE_CLIENT --echo --query "SELECT file_segment_range_begin, file_segment_range_end, size, state
FROM FROM
( (
SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path
@ -70,7 +70,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
$CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100, 200)" $CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100, 200)"
$CLICKHOUSE_CLIENT --echo -n --query "SELECT file_segment_range_begin, file_segment_range_end, size, state $CLICKHOUSE_CLIENT --echo --query "SELECT file_segment_range_begin, file_segment_range_end, size, state
FROM FROM
( (
SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path
@ -109,7 +109,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
$CLICKHOUSE_CLIENT --echo --query "SYSTEM FLUSH LOGS" $CLICKHOUSE_CLIENT --echo --query "SYSTEM FLUSH LOGS"
$CLICKHOUSE_CLIENT -n --query "SELECT $CLICKHOUSE_CLIENT --query "SELECT
query, ProfileEvents['RemoteFSReadBytes'] > 0 as remote_fs_read query, ProfileEvents['RemoteFSReadBytes'] > 0 as remote_fs_read
FROM FROM
system.query_log system.query_log

View File

@ -15,7 +15,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
DROP ROLE IF EXISTS test_role_02242; DROP ROLE IF EXISTS test_role_02242;
CREATE ROLE test_role_02242; CREATE ROLE test_role_02242;
" "

View File

@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
DROP ROLE IF EXISTS test_role_02244; DROP ROLE IF EXISTS test_role_02244;
CREATE ROLE test_role_02244; CREATE ROLE test_role_02244;
DROP USER IF EXISTS kek_02243; DROP USER IF EXISTS kek_02243;

View File

@ -44,7 +44,7 @@ protobuf_info() {
fi fi
} }
$CLICKHOUSE_CLIENT -n --query " $CLICKHOUSE_CLIENT --query "
DROP TABLE IF EXISTS $MAIN_TABLE; DROP TABLE IF EXISTS $MAIN_TABLE;
DROP TABLE IF EXISTS $ROUNDTRIP_TABLE; DROP TABLE IF EXISTS $ROUNDTRIP_TABLE;
DROP TABLE IF EXISTS $COMPATIBILITY_TABLE; DROP TABLE IF EXISTS $COMPATIBILITY_TABLE;
@ -78,14 +78,14 @@ echo $SET_OUTPUT
echo echo
echo "Insert $INITIAL_INSERT_VALUES into table (Nullable(String), Int32):" echo "Insert $INITIAL_INSERT_VALUES into table (Nullable(String), Int32):"
$CLICKHOUSE_CLIENT -n --query " $CLICKHOUSE_CLIENT --query "
INSERT INTO $MAIN_TABLE VALUES $INITIAL_INSERT_VALUES; INSERT INTO $MAIN_TABLE VALUES $INITIAL_INSERT_VALUES;
SELECT * FROM $MAIN_TABLE; SELECT * FROM $MAIN_TABLE;
" "
echo echo
echo "Protobuf representation of the second row:" echo "Protobuf representation of the second row:"
$CLICKHOUSE_CLIENT -n --query "$SET_OUTPUT SELECT * FROM $MAIN_TABLE WHERE ref = 2 LIMIT 1 $(protobuf_info output ProtobufSingle Message)" > "$BINARY_FILE_PATH" $CLICKHOUSE_CLIENT --query "$SET_OUTPUT SELECT * FROM $MAIN_TABLE WHERE ref = 2 LIMIT 1 $(protobuf_info output ProtobufSingle Message)" > "$BINARY_FILE_PATH"
hexdump -C $BINARY_FILE_PATH hexdump -C $BINARY_FILE_PATH
echo echo
@ -101,12 +101,12 @@ hexdump -C $MESSAGE_FILE_PATH
echo echo
echo "Insert proto message into table (Nullable(String), Int32):" echo "Insert proto message into table (Nullable(String), Int32):"
$CLICKHOUSE_CLIENT -n --query "$SET_INPUT INSERT INTO $ROUNDTRIP_TABLE $(protobuf_info input Protobuf Message)" < "$MESSAGE_FILE_PATH" $CLICKHOUSE_CLIENT --query "$SET_INPUT INSERT INTO $ROUNDTRIP_TABLE $(protobuf_info input Protobuf Message)" < "$MESSAGE_FILE_PATH"
$CLICKHOUSE_CLIENT --query "SELECT * FROM $ROUNDTRIP_TABLE" $CLICKHOUSE_CLIENT --query "SELECT * FROM $ROUNDTRIP_TABLE"
echo echo
echo "Proto output of the table using Google wrapper:" echo "Proto output of the table using Google wrapper:"
$CLICKHOUSE_CLIENT -n --query "$SET_OUTPUT SELECT * FROM $ROUNDTRIP_TABLE $(protobuf_info output Protobuf Message)" > "$BINARY_FILE_PATH" $CLICKHOUSE_CLIENT --query "$SET_OUTPUT SELECT * FROM $ROUNDTRIP_TABLE $(protobuf_info output Protobuf Message)" > "$BINARY_FILE_PATH"
hexdump -C $BINARY_FILE_PATH hexdump -C $BINARY_FILE_PATH
echo echo
@ -124,14 +124,14 @@ echo
echo "Insert $MULTI_WRAPPER_VALUES and reinsert using Google wrappers into:" echo "Insert $MULTI_WRAPPER_VALUES and reinsert using Google wrappers into:"
echo "Table (Nullable(Int32), Nullable(Int32), Int32):" echo "Table (Nullable(Int32), Nullable(Int32), Int32):"
$CLICKHOUSE_CLIENT --query "INSERT INTO $MULTI_TABLE VALUES $MULTI_WRAPPER_VALUES" $CLICKHOUSE_CLIENT --query "INSERT INTO $MULTI_TABLE VALUES $MULTI_WRAPPER_VALUES"
$CLICKHOUSE_CLIENT -n --query "$SET_OUTPUT SELECT * FROM $MULTI_TABLE $(protobuf_info output Protobuf MessageMultiWrapper)" > "$BINARY_FILE_PATH" $CLICKHOUSE_CLIENT --query "$SET_OUTPUT SELECT * FROM $MULTI_TABLE $(protobuf_info output Protobuf MessageMultiWrapper)" > "$BINARY_FILE_PATH"
$CLICKHOUSE_CLIENT -n --query "$SET_INPUT INSERT INTO $MULTI_TABLE $(protobuf_info input Protobuf MessageMultiWrapper)" < "$BINARY_FILE_PATH" $CLICKHOUSE_CLIENT --query "$SET_INPUT INSERT INTO $MULTI_TABLE $(protobuf_info input Protobuf MessageMultiWrapper)" < "$BINARY_FILE_PATH"
$CLICKHOUSE_CLIENT --query "SELECT * FROM $MULTI_TABLE" $CLICKHOUSE_CLIENT --query "SELECT * FROM $MULTI_TABLE"
rm "$BINARY_FILE_PATH" rm "$BINARY_FILE_PATH"
rm "$MESSAGE_FILE_PATH" rm "$MESSAGE_FILE_PATH"
$CLICKHOUSE_CLIENT -n --query " $CLICKHOUSE_CLIENT --query "
DROP TABLE $MAIN_TABLE; DROP TABLE $MAIN_TABLE;
DROP TABLE $ROUNDTRIP_TABLE; DROP TABLE $ROUNDTRIP_TABLE;
DROP TABLE $COMPATIBILITY_TABLE; DROP TABLE $COMPATIBILITY_TABLE;

View File

@ -11,7 +11,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
echo "Using storage policy: $STORAGE_POLICY" echo "Using storage policy: $STORAGE_POLICY"
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS test_02286" $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS test_02286"
$CLICKHOUSE_CLIENT -n --query "CREATE TABLE test_02286 (key UInt32, value String) $CLICKHOUSE_CLIENT --query "CREATE TABLE test_02286 (key UInt32, value String)
Engine=MergeTree() Engine=MergeTree()
ORDER BY key ORDER BY key
SETTINGS storage_policy='$STORAGE_POLICY', min_bytes_for_wide_part = 10485760" SETTINGS storage_policy='$STORAGE_POLICY', min_bytes_for_wide_part = 10485760"
@ -38,7 +38,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
$CLICKHOUSE_CLIENT --query "SELECT * FROM test_02286 FORMAT Null" $CLICKHOUSE_CLIENT --query "SELECT * FROM test_02286 FORMAT Null"
$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache" $CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache"
$CLICKHOUSE_CLIENT -n --query "SELECT count() $CLICKHOUSE_CLIENT --query "SELECT count()
FROM ( FROM (
SELECT SELECT
arrayJoin(cache_paths) AS cache_path, arrayJoin(cache_paths) AS cache_path,
@ -54,7 +54,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache" $CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache"
$CLICKHOUSE_CLIENT --query "SELECT cache_path FROM system.filesystem_cache" $CLICKHOUSE_CLIENT --query "SELECT cache_path FROM system.filesystem_cache"
$CLICKHOUSE_CLIENT -n --query "SELECT cache_path, local_path $CLICKHOUSE_CLIENT --query "SELECT cache_path, local_path
FROM ( FROM (
SELECT SELECT
arrayJoin(cache_paths) AS cache_path, arrayJoin(cache_paths) AS cache_path,

View File

@ -23,7 +23,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 's3_cache_multi' 'azure_cache'; d
ORDER BY tuple() ORDER BY tuple()
SETTINGS storage_policy = '$STORAGE_POLICY'" > /dev/null SETTINGS storage_policy = '$STORAGE_POLICY'" > /dev/null
$CLICKHOUSE_CLIENT --enable_filesystem_cache_on_write_operations=0 -n --query "INSERT INTO test_02313 $CLICKHOUSE_CLIENT --enable_filesystem_cache_on_write_operations=0 --query "INSERT INTO test_02313
SELECT * FROM SELECT * FROM
generateRandom('id Int32, val String') generateRandom('id Int32, val String')
LIMIT 100000" LIMIT 100000"

View File

@ -9,7 +9,7 @@ function check_refcnt_for_table()
{ {
local table=$1 && shift local table=$1 && shift
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
system stop merges $table; system stop merges $table;
-- cleanup thread may hold the parts lock -- cleanup thread may hold the parts lock
system stop cleanup $table; system stop cleanup $table;
@ -66,14 +66,14 @@ function check_refcnt_for_table()
# NOTE: index_granularity=1 to cancel ASAP # NOTE: index_granularity=1 to cancel ASAP
$CLICKHOUSE_CLIENT -nmq " $CLICKHOUSE_CLIENT -mq "
drop table if exists data_02340; drop table if exists data_02340;
create table data_02340 (key Int, part Int) engine=MergeTree() partition by part order by key settings index_granularity=1; create table data_02340 (key Int, part Int) engine=MergeTree() partition by part order by key settings index_granularity=1;
" || exit 1 " || exit 1
check_refcnt_for_table data_02340 check_refcnt_for_table data_02340
$CLICKHOUSE_CLIENT -q "drop table data_02340 sync" $CLICKHOUSE_CLIENT -q "drop table data_02340 sync"
$CLICKHOUSE_CLIENT -nmq " $CLICKHOUSE_CLIENT -mq "
drop table if exists data_02340_rep sync; drop table if exists data_02340_rep sync;
create table data_02340_rep (key Int, part Int) engine=ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX', '1') partition by part order by key settings index_granularity=1; create table data_02340_rep (key Int, part Int) engine=ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX', '1') partition by part order by key settings index_granularity=1;
" || exit 1 " || exit 1

View File

@ -7,14 +7,14 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
disk_name="02344_describe_cache_test" disk_name="02344_describe_cache_test"
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
DROP TABLE IF EXISTS test; DROP TABLE IF EXISTS test;
CREATE TABLE test (a Int32, b String) CREATE TABLE test (a Int32, b String)
ENGINE = MergeTree() ORDER BY tuple() ENGINE = MergeTree() ORDER BY tuple()
SETTINGS disk = disk(name = '$disk_name', type = cache, max_size = '100Ki', path = '$disk_name', disk = 's3_disk', load_metadata_asynchronously = 0); SETTINGS disk = disk(name = '$disk_name', type = cache, max_size = '100Ki', path = '$disk_name', disk = 's3_disk', load_metadata_asynchronously = 0);
""" """
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
SELECT count() FROM system.disks WHERE name = '$disk_name' SELECT count() FROM system.disks WHERE name = '$disk_name'
""" """

View File

@ -24,7 +24,7 @@ function wait_query_by_id_started()
# wait for query to be started # wait for query to be started
while [ "$($CLICKHOUSE_CLIENT "$@" -q "select count() from system.processes where query_id = '$query_id'")" -ne 1 ]; do while [ "$($CLICKHOUSE_CLIENT "$@" -q "select count() from system.processes where query_id = '$query_id'")" -ne 1 ]; do
if [ "$( if [ "$(
$CLICKHOUSE_CLIENT --max_bytes_before_external_group_by 0 -nm -q " $CLICKHOUSE_CLIENT --max_bytes_before_external_group_by 0 -m -q "
system flush logs; system flush logs;
select count() from system.query_log select count() from system.query_log
@ -52,7 +52,7 @@ $CLICKHOUSE_CLIENT -q "CREATE DATABASE ${CLICKHOUSE_DATABASE}_ordinary Engine=Or
# debug build on CI, so if this will happen, then DROP query will be # debug build on CI, so if this will happen, then DROP query will be
# finished instantly, and to avoid flakiness we will retry in this case # finished instantly, and to avoid flakiness we will retry in this case
while :; do while :; do
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}_ordinary.data_02352; DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}_ordinary.data_02352;
CREATE TABLE ${CLICKHOUSE_DATABASE}_ordinary.data_02352 (key Int) Engine=Null(); CREATE TABLE ${CLICKHOUSE_DATABASE}_ordinary.data_02352 (key Int) Engine=Null();
" "

View File

@ -9,13 +9,13 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q "ATTACH TABLE mv" |& { $CLICKHOUSE_CLIENT -m -q "ATTACH TABLE mv" |& {
# CANNOT_GET_CREATE_TABLE_QUERY -- ATTACH TABLE IF EXISTS # CANNOT_GET_CREATE_TABLE_QUERY -- ATTACH TABLE IF EXISTS
# TABLE_ALREADY_EXISTS -- ATTACH TABLE IF NOT EXISTS # TABLE_ALREADY_EXISTS -- ATTACH TABLE IF NOT EXISTS
grep -F -m1 Exception | grep -v -e CANNOT_GET_CREATE_TABLE_QUERY -e TABLE_ALREADY_EXISTS grep -F -m1 Exception | grep -v -e CANNOT_GET_CREATE_TABLE_QUERY -e TABLE_ALREADY_EXISTS
} }
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
DROP TABLE IF EXISTS null; DROP TABLE IF EXISTS null;
CREATE TABLE null (key Int) ENGINE = Null; CREATE TABLE null (key Int) ENGINE = Null;
DROP TABLE IF EXISTS mv; DROP TABLE IF EXISTS mv;

View File

@ -560,7 +560,6 @@ positionCaseInsensitive
positionCaseInsensitiveUTF8 positionCaseInsensitiveUTF8
positionUTF8 positionUTF8
pow pow
printf
proportionsZTest proportionsZTest
protocol protocol
queryID queryID

View File

@ -27,7 +27,7 @@ function insert()
function check_span() function check_span()
{ {
${CLICKHOUSE_CLIENT} -nq " ${CLICKHOUSE_CLIENT} -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT operation_name, SELECT operation_name,
@ -50,7 +50,7 @@ ${CLICKHOUSE_CLIENT} -nq "
# $2 - value of distributed_foreground_insert # $2 - value of distributed_foreground_insert
function check_span_kind() function check_span_kind()
{ {
${CLICKHOUSE_CLIENT} -nq " ${CLICKHOUSE_CLIENT} -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT count() SELECT count()
@ -65,7 +65,7 @@ ${CLICKHOUSE_CLIENT} -nq "
# #
# Prepare tables for tests # Prepare tables for tests
# #
${CLICKHOUSE_CLIENT} -nq " ${CLICKHOUSE_CLIENT} -q "
DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}.dist_opentelemetry; DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}.dist_opentelemetry;
DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}.local_opentelemetry; DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}.local_opentelemetry;
@ -122,7 +122,7 @@ check_span_kind $trace_id 'CLIENT'
# #
# Cleanup # Cleanup
# #
${CLICKHOUSE_CLIENT} -nq " ${CLICKHOUSE_CLIENT} -q "
DROP TABLE ${CLICKHOUSE_DATABASE}.dist_opentelemetry; DROP TABLE ${CLICKHOUSE_DATABASE}.dist_opentelemetry;
DROP TABLE ${CLICKHOUSE_DATABASE}.local_opentelemetry; DROP TABLE ${CLICKHOUSE_DATABASE}.local_opentelemetry;
" "

View File

@ -9,7 +9,7 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS 02419_test SYNC;"
test_primary_key() test_primary_key()
{ {
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
CREATE TABLE 02419_test (key UInt64, value Float64) Engine=KeeperMap('/' || currentDatabase() || '/test2418', 3) PRIMARY KEY($1); CREATE TABLE 02419_test (key UInt64, value Float64) Engine=KeeperMap('/' || currentDatabase() || '/test2418', 3) PRIMARY KEY($1);
INSERT INTO 02419_test VALUES (1, 1.1), (2, 2.2); INSERT INTO 02419_test VALUES (1, 1.1), (2, 2.2);
SELECT value FROM 02419_test WHERE key = 1; SELECT value FROM 02419_test WHERE key = 1;

View File

@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/replication.lib . "$CURDIR"/replication.lib
$CLICKHOUSE_CLIENT -n -q " $CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS alter_table0; DROP TABLE IF EXISTS alter_table0;
DROP TABLE IF EXISTS alter_table1; DROP TABLE IF EXISTS alter_table1;

View File

@ -21,7 +21,7 @@ wait_for_number_of_parts() {
echo "$res" echo "$res"
} }
$CLICKHOUSE_CLIENT -nmq " $CLICKHOUSE_CLIENT -mq "
DROP TABLE IF EXISTS test_without_merge; DROP TABLE IF EXISTS test_without_merge;
DROP TABLE IF EXISTS test_with_merge; DROP TABLE IF EXISTS test_with_merge;
@ -34,7 +34,7 @@ INSERT INTO test_without_merge SELECT 3;"
wait_for_number_of_parts 'test_without_merge' 1 10 wait_for_number_of_parts 'test_without_merge' 1 10
$CLICKHOUSE_CLIENT -nmq " $CLICKHOUSE_CLIENT -mq "
DROP TABLE test_without_merge; DROP TABLE test_without_merge;
SELECT 'With merge any part range'; SELECT 'With merge any part range';
@ -47,7 +47,7 @@ INSERT INTO test_with_merge SELECT 3;"
wait_for_number_of_parts 'test_with_merge' 1 100 wait_for_number_of_parts 'test_with_merge' 1 100
$CLICKHOUSE_CLIENT -nmq " $CLICKHOUSE_CLIENT -mq "
DROP TABLE test_with_merge; DROP TABLE test_with_merge;
SELECT 'With merge partition only'; SELECT 'With merge partition only';
@ -60,7 +60,7 @@ INSERT INTO test_with_merge SELECT 3;"
wait_for_number_of_parts 'test_with_merge' 1 100 wait_for_number_of_parts 'test_with_merge' 1 100
$CLICKHOUSE_CLIENT -nmq " $CLICKHOUSE_CLIENT -mq "
SELECT sleepEachRow(1) FROM numbers(9) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null; -- Sleep for 9 seconds and verify that we keep the old part because it's the only one SELECT sleepEachRow(1) FROM numbers(9) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null; -- Sleep for 9 seconds and verify that we keep the old part because it's the only one
SELECT (now() - modification_time) > 5 FROM system.parts WHERE database = currentDatabase() AND table='test_with_merge' AND active; SELECT (now() - modification_time) > 5 FROM system.parts WHERE database = currentDatabase() AND table='test_with_merge' AND active;

View File

@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
-- Limit S3 PUT request per second rate -- Limit S3 PUT request per second rate
SET s3_max_put_rps = 2; SET s3_max_put_rps = 2;
SET s3_max_put_burst = 1; SET s3_max_put_burst = 1;

View File

@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -n --query " $CLICKHOUSE_CLIENT --query "
DROP TABLE IF EXISTS wikistat1 SYNC; DROP TABLE IF EXISTS wikistat1 SYNC;
DROP TABLE IF EXISTS wikistat2 SYNC; DROP TABLE IF EXISTS wikistat2 SYNC;
" "
@ -60,7 +60,7 @@ wait
$CLICKHOUSE_CLIENT --query "SELECT count() FROM wikistat1 WHERE NOT ignore(*)" $CLICKHOUSE_CLIENT --query "SELECT count() FROM wikistat1 WHERE NOT ignore(*)"
$CLICKHOUSE_CLIENT --query "SELECT count() FROM wikistat2 WHERE NOT ignore(*)" $CLICKHOUSE_CLIENT --query "SELECT count() FROM wikistat2 WHERE NOT ignore(*)"
$CLICKHOUSE_CLIENT -n --query " $CLICKHOUSE_CLIENT --query "
DROP TABLE IF EXISTS wikistat1 SYNC; DROP TABLE IF EXISTS wikistat1 SYNC;
DROP TABLE IF EXISTS wikistat2 SYNC; DROP TABLE IF EXISTS wikistat2 SYNC;
" "

View File

@ -11,7 +11,7 @@ cp $CURDIR/data_ua_parser/os.yaml ${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}/
cp $CURDIR/data_ua_parser/browser.yaml ${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}/ cp $CURDIR/data_ua_parser/browser.yaml ${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}/
cp $CURDIR/data_ua_parser/device.yaml ${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}/ cp $CURDIR/data_ua_parser/device.yaml ${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}/
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
drop dictionary if exists regexp_os; drop dictionary if exists regexp_os;
drop dictionary if exists regexp_browser; drop dictionary if exists regexp_browser;
drop dictionary if exists regexp_device; drop dictionary if exists regexp_device;
@ -61,10 +61,10 @@ create table user_agents
Engine = Log(); Engine = Log();
" "
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
insert into user_agents select ua from input('ua String') FORMAT LineAsString" < $CURDIR/data_ua_parser/useragents.txt insert into user_agents select ua from input('ua String') FORMAT LineAsString" < $CURDIR/data_ua_parser/useragents.txt
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
select ua, device, select ua, device,
concat(tupleElement(browser, 1), ' ', tupleElement(browser, 2), '.', tupleElement(browser, 3)) as browser , concat(tupleElement(browser, 1), ' ', tupleElement(browser, 2), '.', tupleElement(browser, 3)) as browser ,
concat(tupleElement(os, 1), ' ', tupleElement(os, 2), '.', tupleElement(os, 3), '.', tupleElement(os, 4)) as os concat(tupleElement(os, 1), ' ', tupleElement(os, 2), '.', tupleElement(os, 3), '.', tupleElement(os, 4)) as os
@ -74,7 +74,7 @@ from (
dictGet('regexp_device', 'device_replacement', ua) device from user_agents) order by ua; dictGet('regexp_device', 'device_replacement', ua) device from user_agents) order by ua;
" "
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
drop dictionary if exists regexp_os; drop dictionary if exists regexp_os;
drop dictionary if exists regexp_browser; drop dictionary if exists regexp_browser;
drop dictionary if exists regexp_device; drop dictionary if exists regexp_device;

View File

@ -27,7 +27,7 @@ cat > "$yaml" <<EOL
version: '10' version: '10'
EOL EOL
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
drop dictionary if exists regexp_dict1; drop dictionary if exists regexp_dict1;
create dictionary regexp_dict1 create dictionary regexp_dict1
( (
@ -69,7 +69,7 @@ cat > "$yaml" <<EOL
lucky: 'abcde' lucky: 'abcde'
EOL EOL
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
system reload dictionary regexp_dict1; -- { serverError 489 } system reload dictionary regexp_dict1; -- { serverError 489 }
" "
@ -79,7 +79,7 @@ cat > "$yaml" <<EOL
version: '\1' version: '\1'
EOL EOL
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
system reload dictionary regexp_dict1; -- { serverError 318 } system reload dictionary regexp_dict1; -- { serverError 318 }
" "
@ -92,7 +92,7 @@ cat > "$yaml" <<EOL
version: '\2.\3' version: '\2.\3'
EOL EOL
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
system reload dictionary regexp_dict1; system reload dictionary regexp_dict1;
select dictGet('regexp_dict1', ('name', 'version'), 'Mozilla/5.0 (BB10; Touch) AppleWebKit/537.3+ (KHTML, like Gecko) Version/10.0.9.388 Mobile Safari/537.3+'); select dictGet('regexp_dict1', ('name', 'version'), 'Mozilla/5.0 (BB10; Touch) AppleWebKit/537.3+ (KHTML, like Gecko) Version/10.0.9.388 Mobile Safari/537.3+');
select dictGet('regexp_dict1', ('name', 'version'), 'Mozilla/5.0 (PlayBook; U; RIM Tablet OS 1.0.0; en-US) AppleWebKit/534.8+ (KHTML, like Gecko) Version/0.0.1 Safari/534.8+'); select dictGet('regexp_dict1', ('name', 'version'), 'Mozilla/5.0 (PlayBook; U; RIM Tablet OS 1.0.0; en-US) AppleWebKit/534.8+ (KHTML, like Gecko) Version/0.0.1 Safari/534.8+');
@ -107,7 +107,7 @@ cat > "$yaml" <<EOL
col_array: '[1,2,3,-1,-2,-3]' col_array: '[1,2,3,-1,-2,-3]'
EOL EOL
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
create dictionary regexp_dict2 create dictionary regexp_dict2
( (
regexp String, regexp String,
@ -147,7 +147,7 @@ cat > "$yaml" <<EOL
EOL EOL
# dictGetAll # dictGetAll
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
drop dictionary if exists regexp_dict3; drop dictionary if exists regexp_dict3;
create dictionary regexp_dict3 create dictionary regexp_dict3
( (
@ -192,7 +192,7 @@ cat > "$yaml" <<EOL
tag: 'Documentation' tag: 'Documentation'
EOL EOL
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
drop dictionary if exists regexp_dict3; drop dictionary if exists regexp_dict3;
create dictionary regexp_dict3 create dictionary regexp_dict3
( (
@ -252,7 +252,7 @@ cat > "$yaml" <<EOL
pattern: '(?-i)hello.*world' pattern: '(?-i)hello.*world'
EOL EOL
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
drop dictionary if exists regexp_dict4; drop dictionary if exists regexp_dict4;
create dictionary regexp_dict4 create dictionary regexp_dict4
( (
@ -291,7 +291,7 @@ select dictGetAll('regexp_dict4', 'pattern', 'HELLO WORLD');
select dictGetAll('regexp_dict4', 'pattern', 'HELLO\nWORLD'); select dictGetAll('regexp_dict4', 'pattern', 'HELLO\nWORLD');
" "
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
drop dictionary regexp_dict1; drop dictionary regexp_dict1;
drop dictionary regexp_dict2; drop dictionary regexp_dict2;
drop dictionary regexp_dict3; drop dictionary regexp_dict3;

View File

@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# Check that if the background cleanup thread works correctly. # Check that if the background cleanup thread works correctly.
CLICKHOUSE_TEST_ZOOKEEPER_PREFIX="${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}" CLICKHOUSE_TEST_ZOOKEEPER_PREFIX="${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}"
$CLICKHOUSE_CLIENT -n --query " $CLICKHOUSE_CLIENT --query "
DROP TABLE IF EXISTS t_async_insert_cleanup SYNC; DROP TABLE IF EXISTS t_async_insert_cleanup SYNC;
CREATE TABLE t_async_insert_cleanup ( CREATE TABLE t_async_insert_cleanup (
KeyID UInt32 KeyID UInt32
@ -27,7 +27,7 @@ old_answer=$($CLICKHOUSE_CLIENT --query "SELECT count(*) FROM system.zookeeper W
for i in {1..300}; do for i in {1..300}; do
answer=$($CLICKHOUSE_CLIENT --query "SELECT count(*) FROM system.zookeeper WHERE path like '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/t_async_insert_cleanup/async_blocks%' settings allow_unrestricted_reads_from_keeper = 'true'") answer=$($CLICKHOUSE_CLIENT --query "SELECT count(*) FROM system.zookeeper WHERE path like '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/t_async_insert_cleanup/async_blocks%' settings allow_unrestricted_reads_from_keeper = 'true'")
if [ $answer == '10' ]; then if [ $answer == '10' ]; then
$CLICKHOUSE_CLIENT -n --query "DROP TABLE t_async_insert_cleanup SYNC;" $CLICKHOUSE_CLIENT --query "DROP TABLE t_async_insert_cleanup SYNC;"
exit 0 exit 0
fi fi
sleep 1 sleep 1
@ -36,4 +36,4 @@ done
$CLICKHOUSE_CLIENT --query "SELECT count(*) FROM t_async_insert_cleanup" $CLICKHOUSE_CLIENT --query "SELECT count(*) FROM t_async_insert_cleanup"
echo $old_answer echo $old_answer
$CLICKHOUSE_CLIENT --query "SELECT count(*) FROM system.zookeeper WHERE path like '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/t_async_insert_cleanup/async_blocks%' settings allow_unrestricted_reads_from_keeper = 'true'" $CLICKHOUSE_CLIENT --query "SELECT count(*) FROM system.zookeeper WHERE path like '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/t_async_insert_cleanup/async_blocks%' settings allow_unrestricted_reads_from_keeper = 'true'"
$CLICKHOUSE_CLIENT -n --query "DROP TABLE t_async_insert_cleanup SYNC;" $CLICKHOUSE_CLIENT --query "DROP TABLE t_async_insert_cleanup SYNC;"

View File

@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
QUERY_ID="${CLICKHOUSE_DATABASE}_test_02585_query_to_kill_id_1" QUERY_ID="${CLICKHOUSE_DATABASE}_test_02585_query_to_kill_id_1"
$CLICKHOUSE_CLIENT --query_id="$QUERY_ID" --max_rows_to_read 0 -n -q " $CLICKHOUSE_CLIENT --query_id="$QUERY_ID" --max_rows_to_read 0 -q "
create temporary table tmp as select * from numbers(100000000); create temporary table tmp as select * from numbers(100000000);
select * from remote('127.0.0.2', 'system.numbers_mt') where number in (select * from tmp);" &> /dev/null & select * from remote('127.0.0.2', 'system.numbers_mt') where number in (select * from tmp);" &> /dev/null &

View File

@ -13,7 +13,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
function get_query_id() { random_str 10; } function get_query_id() { random_str 10; }
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists buf; drop table if exists buf;
drop table if exists dist; drop table if exists dist;
drop table if exists data; drop table if exists data;
@ -31,7 +31,7 @@ query_id="$(get_query_id)"
# test, since we care about the difference between NOW() and there should # test, since we care about the difference between NOW() and there should
# not be any significant difference. # not be any significant difference.
$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -q "select * from dist" $CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -q "select * from dist"
$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q " $CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q "
system flush logs; system flush logs;
select count(), count(distinct initial_query_start_time_microseconds) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String}; select count(), count(distinct initial_query_start_time_microseconds) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String};
" "
@ -42,25 +42,25 @@ query_id="$(get_query_id)"
# this query (and all subsequent) should reuse the previous connection (at least most of the time) # this query (and all subsequent) should reuse the previous connection (at least most of the time)
$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -q "select * from dist" $CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -q "select * from dist"
$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q " $CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q "
system flush logs; system flush logs;
select count(), count(distinct initial_query_start_time_microseconds) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String}; select count(), count(distinct initial_query_start_time_microseconds) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String};
" "
echo "INSERT" echo "INSERT"
query_id="$(get_query_id)" query_id="$(get_query_id)"
$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -nm -q " $CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -m -q "
insert into dist_dist values (1),(2); insert into dist_dist values (1),(2);
select * from data; select * from data;
" "
sleep 1 sleep 1
$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q "system flush distributed dist_dist" $CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q "system flush distributed dist_dist"
sleep 1 sleep 1
$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q "system flush distributed dist" $CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q "system flush distributed dist"
echo "CHECK" echo "CHECK"
$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q " $CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q "
select * from data order by key; select * from data order by key;
system flush logs; system flush logs;
select count(), count(distinct initial_query_start_time_microseconds) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String}; select count(), count(distinct initial_query_start_time_microseconds) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String};

View File

@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
echo "INSERT TO S3" echo "INSERT TO S3"
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " $CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q "
INSERT INTO TABLE FUNCTION s3('http://localhost:11111/test/profile_events.csv', 'test', 'testtest', 'CSV', 'number UInt64') SELECT number FROM numbers(1000000) SETTINGS s3_max_single_part_upload_size = 10, s3_truncate_on_insert = 1; INSERT INTO TABLE FUNCTION s3('http://localhost:11111/test/profile_events.csv', 'test', 'testtest', 'CSV', 'number UInt64') SELECT number FROM numbers(1000000) SETTINGS s3_max_single_part_upload_size = 10, s3_truncate_on_insert = 1;
" 2>&1 | $CLICKHOUSE_LOCAL -q " " 2>&1 | $CLICKHOUSE_LOCAL -q "
WITH '(\\w+): (\\d+)' AS pattern, WITH '(\\w+): (\\d+)' AS pattern,
@ -30,7 +30,7 @@ SELECT * FROM (
" "
echo "CHECK WITH query_log" echo "CHECK WITH query_log"
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT type, SELECT type,
'S3CreateMultipartUpload', ProfileEvents['S3CreateMultipartUpload'], 'S3CreateMultipartUpload', ProfileEvents['S3CreateMultipartUpload'],
@ -45,7 +45,7 @@ ORDER BY query_start_time DESC;
" "
echo "CREATE" echo "CREATE"
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS times; DROP TABLE IF EXISTS times;
CREATE TABLE times (t DateTime) ENGINE MergeTree ORDER BY t CREATE TABLE times (t DateTime) ENGINE MergeTree ORDER BY t
SETTINGS SETTINGS
@ -56,29 +56,29 @@ CREATE TABLE times (t DateTime) ENGINE MergeTree ORDER BY t
" "
echo "INSERT" echo "INSERT"
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " $CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q "
INSERT INTO times SELECT now() + INTERVAL 1 day SETTINGS optimize_on_insert = 0; INSERT INTO times SELECT now() + INTERVAL 1 day SETTINGS optimize_on_insert = 0;
" 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* ' " 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* '
echo "READ" echo "READ"
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " $CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q "
SELECT '1', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1; SELECT '1', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1;
" 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* ' " 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* '
echo "INSERT and READ INSERT" echo "INSERT and READ INSERT"
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " $CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q "
INSERT INTO times SELECT now() + INTERVAL 2 day SETTINGS optimize_on_insert = 0; INSERT INTO times SELECT now() + INTERVAL 2 day SETTINGS optimize_on_insert = 0;
SELECT '2', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1; SELECT '2', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1;
INSERT INTO times SELECT now() + INTERVAL 3 day SETTINGS optimize_on_insert = 0; INSERT INTO times SELECT now() + INTERVAL 3 day SETTINGS optimize_on_insert = 0;
" 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* ' " 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* '
echo "DROP" echo "DROP"
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
DROP TABLE times; DROP TABLE times;
" "
echo "CHECK with query_log" echo "CHECK with query_log"
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT type, SELECT type,
query, query,

View File

@ -21,7 +21,7 @@ wait_for_number_of_parts() {
echo "$res" echo "$res"
} }
$CLICKHOUSE_CLIENT -nmq " $CLICKHOUSE_CLIENT -mq "
DROP TABLE IF EXISTS test_without_merge; DROP TABLE IF EXISTS test_without_merge;
DROP TABLE IF EXISTS test_replicated; DROP TABLE IF EXISTS test_replicated;
@ -34,7 +34,7 @@ INSERT INTO test_without_merge SELECT 3;"
wait_for_number_of_parts 'test_without_merge' 1 10 wait_for_number_of_parts 'test_without_merge' 1 10
$CLICKHOUSE_CLIENT -nmq " $CLICKHOUSE_CLIENT -mq "
DROP TABLE test_without_merge; DROP TABLE test_without_merge;
SELECT 'With merge replicated any part range'; SELECT 'With merge replicated any part range';
@ -47,7 +47,7 @@ INSERT INTO test_replicated SELECT 3;"
wait_for_number_of_parts 'test_replicated' 1 100 wait_for_number_of_parts 'test_replicated' 1 100
$CLICKHOUSE_CLIENT -nmq " $CLICKHOUSE_CLIENT -mq "
DROP TABLE test_replicated; DROP TABLE test_replicated;
SELECT 'With merge replicated partition only'; SELECT 'With merge replicated partition only';
@ -60,7 +60,7 @@ INSERT INTO test_replicated SELECT 3;"
wait_for_number_of_parts 'test_replicated' 1 100 wait_for_number_of_parts 'test_replicated' 1 100
$CLICKHOUSE_CLIENT -nmq " $CLICKHOUSE_CLIENT -mq "
SELECT sleepEachRow(1) FROM numbers(9) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null; -- Sleep for 9 seconds and verify that we keep the old part because it's the only one SELECT sleepEachRow(1) FROM numbers(9) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null; -- Sleep for 9 seconds and verify that we keep the old part because it's the only one
SELECT (now() - modification_time) > 5 FROM system.parts WHERE database = currentDatabase() AND table='test_replicated' AND active; SELECT (now() - modification_time) > 5 FROM system.parts WHERE database = currentDatabase() AND table='test_replicated' AND active;

View File

@ -9,6 +9,6 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
echo " echo "
DROP USER IF EXISTS postgresql_user; DROP USER IF EXISTS postgresql_user;
CREATE USER postgresql_user HOST IP '127.0.0.1' IDENTIFIED WITH no_password; CREATE USER postgresql_user HOST IP '127.0.0.1' IDENTIFIED WITH no_password;
" | $CLICKHOUSE_CLIENT -n " | $CLICKHOUSE_CLIENT
psql --host localhost --port ${CLICKHOUSE_PORT_POSTGRESQL} ${CLICKHOUSE_DATABASE} --user postgresql_user -c "SELECT 1.23::Decimal256(70) AS test;" psql --host localhost --port ${CLICKHOUSE_PORT_POSTGRESQL} ${CLICKHOUSE_DATABASE} --user postgresql_user -c "SELECT 1.23::Decimal256(70) AS test;"

View File

@ -12,7 +12,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# too slow with this. # too slow with this.
# #
# Unfortunately, the test has to buffer it in memory. # Unfortunately, the test has to buffer it in memory.
$CLICKHOUSE_CLIENT --max_memory_usage 16G -nm -q " $CLICKHOUSE_CLIENT --max_memory_usage 16G -m -q "
INSERT INTO FUNCTION s3('http://localhost:11111/test/$CLICKHOUSE_DATABASE/test_INT_MAX.tsv', '', '', 'TSV') INSERT INTO FUNCTION s3('http://localhost:11111/test/$CLICKHOUSE_DATABASE/test_INT_MAX.tsv', '', '', 'TSV')
SELECT repeat('a', 1024) FROM numbers((pow(2, 30) * 2) / 1024) SELECT repeat('a', 1024) FROM numbers((pow(2, 30) * 2) / 1024)
SETTINGS s3_max_single_part_upload_size = '5Gi'; SETTINGS s3_max_single_part_upload_size = '5Gi';

View File

@ -10,7 +10,7 @@ set -e
NUM_REPLICAS=5 NUM_REPLICAS=5
for i in $(seq 1 $NUM_REPLICAS); do for i in $(seq 1 $NUM_REPLICAS); do
$CLICKHOUSE_CLIENT -n -q " $CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS r$i SYNC; DROP TABLE IF EXISTS r$i SYNC;
CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x SETTINGS replicated_deduplication_window = 1, allow_remote_fs_zero_copy_replication = 1; CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x SETTINGS replicated_deduplication_window = 1, allow_remote_fs_zero_copy_replication = 1;
" "

View File

@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
for DISK in s3_disk s3_cache for DISK in s3_disk s3_cache
do do
${CLICKHOUSE_CLIENT} -n --query " ${CLICKHOUSE_CLIENT} --query "
DROP TABLE IF EXISTS test; DROP TABLE IF EXISTS test;
CREATE TABLE test (id Int32, empty Array(Int32)) CREATE TABLE test (id Int32, empty Array(Int32))
ENGINE=MergeTree ORDER BY id ENGINE=MergeTree ORDER BY id
@ -17,13 +17,13 @@ do
SELECT * FROM test; SELECT * FROM test;
" "
${CLICKHOUSE_CLIENT} -n --query " ${CLICKHOUSE_CLIENT} --query "
BACKUP TABLE test TO Disk('backups', 'test_s3_backup'); BACKUP TABLE test TO Disk('backups', 'test_s3_backup');
DROP TABLE test; DROP TABLE test;
RESTORE TABLE test FROM Disk('backups', 'test_s3_backup'); RESTORE TABLE test FROM Disk('backups', 'test_s3_backup');
" &>/dev/null " &>/dev/null
${CLICKHOUSE_CLIENT} -n --query " ${CLICKHOUSE_CLIENT} --query "
SELECT * FROM test; SELECT * FROM test;
SELECT empty FROM test; SELECT empty FROM test;
" "

View File

@ -1,2 +1,2 @@
default 127.0.0.1 9181 0 0 0 1 1 ['FILTERED_LIST','MULTI_READ','CHECK_NOT_EXISTS','CREATE_IF_NOT_EXISTS'] default 127.0.0.1 9181 0 0 0 1 1 ['FILTERED_LIST','MULTI_READ','CHECK_NOT_EXISTS','CREATE_IF_NOT_EXISTS','REMOVE_RECURSIVE']
zookeeper2 localhost 9181 0 0 0 1 zookeeper2 localhost 9181 0 0 0 1

View File

@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -n -q " $CLICKHOUSE_CLIENT -q "
CREATE TEMPORARY TABLE IF NOT EXISTS aboba CREATE TEMPORARY TABLE IF NOT EXISTS aboba
( (
user_id UInt32, user_id UInt32,

View File

@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -nm --query " ${CLICKHOUSE_CLIENT} -m --query "
DROP TABLE IF EXISTS test_s3; DROP TABLE IF EXISTS test_s3;
CREATE TABLE test_s3 (a UInt64, b UInt64) CREATE TABLE test_s3 (a UInt64, b UInt64)
@ -17,7 +17,7 @@ INSERT INTO test_s3 SELECT number, number FROM numbers(1000000);
query="SELECT sum(b) FROM test_s3 WHERE a >= 100000 AND a <= 102000" query="SELECT sum(b) FROM test_s3 WHERE a >= 100000 AND a <= 102000"
query_id=$(${CLICKHOUSE_CLIENT} --query "select queryID() from ($query) limit 1" 2>&1) query_id=$(${CLICKHOUSE_CLIENT} --query "select queryID() from ($query) limit 1" 2>&1)
${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS"
${CLICKHOUSE_CLIENT} -nm --query " ${CLICKHOUSE_CLIENT} -m --query "
SELECT SELECT
ProfileEvents['S3ReadRequestsCount'], ProfileEvents['S3ReadRequestsCount'],
ProfileEvents['ReadBufferFromS3Bytes'], ProfileEvents['ReadBufferFromS3Bytes'],

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
DROP TABLE IF EXISTS test; DROP TABLE IF EXISTS test;
CREATE TABLE test (a Int32, b String) CREATE TABLE test (a Int32, b String)
ENGINE = MergeTree() ENGINE = MergeTree()
@ -22,7 +22,7 @@ INSERT INTO test SELECT number, randomString(100) FROM numbers(1000000);
" "
QUERY_ID=$RANDOM QUERY_ID=$RANDOM
$CLICKHOUSE_CLIENT --query_id "$QUERY_ID" -nm -q " $CLICKHOUSE_CLIENT --query_id "$QUERY_ID" -m -q "
SET enable_filesystem_cache_log = 1; SET enable_filesystem_cache_log = 1;
SYSTEM DROP FILESYSTEM CACHE; SYSTEM DROP FILESYSTEM CACHE;
SELECT * FROM test WHERE NOT ignore() LIMIT 1 FORMAT Null; SELECT * FROM test WHERE NOT ignore() LIMIT 1 FORMAT Null;
@ -49,14 +49,14 @@ WHERE query_id = '$QUERY_ID' "
# File segments cannot be less that 20Mi, # File segments cannot be less that 20Mi,
# except for last file segment in a file or if file size is less. # except for last file segment in a file or if file size is less.
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
SELECT count() FROM ($query) SELECT count() FROM ($query)
WHERE file_segment_size < file_size WHERE file_segment_size < file_size
AND end_offset + 1 != file_size AND end_offset + 1 != file_size
AND file_segment_size < 20 * 1024 * 1024; AND file_segment_size < 20 * 1024 * 1024;
" "
all=$($CLICKHOUSE_CLIENT -nm -q " all=$($CLICKHOUSE_CLIENT -m -q "
SELECT count() FROM ($query) SELECT count() FROM ($query)
WHERE file_segment_size < file_size AND end_offset + 1 != file_size; WHERE file_segment_size < file_size AND end_offset + 1 != file_size;
") ")
@ -68,7 +68,7 @@ else
echo "FAIL" echo "FAIL"
fi fi
count=$($CLICKHOUSE_CLIENT -nm -q " count=$($CLICKHOUSE_CLIENT -m -q "
SELECT count() FROM ($query) SELECT count() FROM ($query)
WHERE file_segment_size < file_size WHERE file_segment_size < file_size
AND end_offset + 1 != file_size AND end_offset + 1 != file_size
@ -87,21 +87,21 @@ FROM (SELECT * FROM ($query)) AS cache_log
INNER JOIN system.filesystem_cache AS cache INNER JOIN system.filesystem_cache AS cache
ON cache_log.cache_path = cache.cache_path " ON cache_log.cache_path = cache.cache_path "
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
SELECT count() FROM ($query2) SELECT count() FROM ($query2)
WHERE file_segment_range_begin - file_segment_range_end + 1 < file_size WHERE file_segment_range_begin - file_segment_range_end + 1 < file_size
AND file_segment_range_end + 1 != file_size AND file_segment_range_end + 1 != file_size
AND downloaded_size < 20 * 1024 * 1024; AND downloaded_size < 20 * 1024 * 1024;
" "
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
SELECT count() FROM ($query2) SELECT count() FROM ($query2)
WHERE file_segment_range_begin - file_segment_range_end + 1 < file_size WHERE file_segment_range_begin - file_segment_range_end + 1 < file_size
AND file_segment_range_end + 1 != file_size AND file_segment_range_end + 1 != file_size
AND formatReadableSize(downloaded_size) not in ('20.00 MiB', '40.00 MiB'); AND formatReadableSize(downloaded_size) not in ('20.00 MiB', '40.00 MiB');
" "
all=$($CLICKHOUSE_CLIENT -nm -q " all=$($CLICKHOUSE_CLIENT -m -q "
SELECT count() FROM ($query2) SELECT count() FROM ($query2)
WHERE file_segment_size < file_size AND file_segment_range_end + 1 != file_size; WHERE file_segment_size < file_size AND file_segment_range_end + 1 != file_size;
") ")
@ -112,7 +112,7 @@ else
echo "FAIL" echo "FAIL"
fi fi
count2=$($CLICKHOUSE_CLIENT -nm -q " count2=$($CLICKHOUSE_CLIENT -m -q "
SELECT count() FROM ($query2) SELECT count() FROM ($query2)
WHERE file_segment_range_begin - file_segment_range_end + 1 < file_size WHERE file_segment_range_begin - file_segment_range_end + 1 < file_size
AND file_segment_range_end + 1 != file_size AND file_segment_range_end + 1 != file_size

View File

@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -nm --query " ${CLICKHOUSE_CLIENT} -m --query "
DROP TABLE IF EXISTS test_s3; DROP TABLE IF EXISTS test_s3;
CREATE TABLE test_s3 (a UInt64, b UInt64) CREATE TABLE test_s3 (a UInt64, b UInt64)
@ -25,7 +25,7 @@ do
query_id=$(${CLICKHOUSE_CLIENT} --query "select queryID() from ($query) limit 1" 2>&1) query_id=$(${CLICKHOUSE_CLIENT} --query "select queryID() from ($query) limit 1" 2>&1)
${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS"
RES=$(${CLICKHOUSE_CLIENT} -nm --query " RES=$(${CLICKHOUSE_CLIENT} -m --query "
SELECT ProfileEvents['DiskConnectionsPreserved'] > 0 SELECT ProfileEvents['DiskConnectionsPreserved'] > 0
FROM system.query_log FROM system.query_log
WHERE type = 'QueryFinish' WHERE type = 'QueryFinish'
@ -41,7 +41,7 @@ done
while true while true
do do
query_id=$(${CLICKHOUSE_CLIENT} -nq " query_id=$(${CLICKHOUSE_CLIENT} -q "
create table mut (n int, m int, k int) engine=ReplicatedMergeTree('/test/02441/{database}/mut', '1') order by n; create table mut (n int, m int, k int) engine=ReplicatedMergeTree('/test/02441/{database}/mut', '1') order by n;
set insert_keeper_fault_injection_probability=0; set insert_keeper_fault_injection_probability=0;
insert into mut values (1, 2, 3), (10, 20, 30); insert into mut values (1, 2, 3), (10, 20, 30);
@ -60,7 +60,7 @@ do
) limit 1 settings max_threads=1; ) limit 1 settings max_threads=1;
" 2>&1) " 2>&1)
${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS"
RES=$(${CLICKHOUSE_CLIENT} -nm --query " RES=$(${CLICKHOUSE_CLIENT} -m --query "
SELECT ProfileEvents['StorageConnectionsPreserved'] > 0 SELECT ProfileEvents['StorageConnectionsPreserved'] > 0
FROM system.query_log FROM system.query_log
WHERE type = 'QueryFinish' WHERE type = 'QueryFinish'

View File

@ -8,7 +8,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
set -e set -e
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists data; drop table if exists data;
create table data (key Int) engine=MergeTree() order by tuple() settings disk='s3_disk'; create table data (key Int) engine=MergeTree() order by tuple() settings disk='s3_disk';
insert into data select * from numbers(10); insert into data select * from numbers(10);
@ -16,28 +16,28 @@ $CLICKHOUSE_CLIENT -nm -q "
query_id=$(random_str 10) query_id=$(random_str 10)
$CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "BACKUP TABLE data TO S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_native_copy') SETTINGS allow_s3_native_copy=true" $CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "BACKUP TABLE data TO S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_native_copy') SETTINGS allow_s3_native_copy=true"
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id' SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id'
" "
query_id=$(random_str 10) query_id=$(random_str 10)
$CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "BACKUP TABLE data TO S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_no_native_copy') SETTINGS allow_s3_native_copy=false" $CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "BACKUP TABLE data TO S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_no_native_copy') SETTINGS allow_s3_native_copy=false"
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id' SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id'
" "
query_id=$(random_str 10) query_id=$(random_str 10)
$CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "RESTORE TABLE data AS data_native_copy FROM S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_native_copy') SETTINGS allow_s3_native_copy=true" $CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "RESTORE TABLE data AS data_native_copy FROM S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_native_copy') SETTINGS allow_s3_native_copy=true"
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id' SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id'
" "
query_id=$(random_str 10) query_id=$(random_str 10)
$CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "RESTORE TABLE data AS data_no_native_copy FROM S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_no_native_copy') SETTINGS allow_s3_native_copy=false" $CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "RESTORE TABLE data AS data_no_native_copy FROM S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_no_native_copy') SETTINGS allow_s3_native_copy=false"
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id' SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id'
" "

View File

@ -8,7 +8,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
set -e set -e
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists data; drop table if exists data;
create table data (key Int) engine=MergeTree() order by tuple() settings disk='s3_disk'; create table data (key Int) engine=MergeTree() order by tuple() settings disk='s3_disk';
insert into data select * from numbers(10); insert into data select * from numbers(10);

View File

@ -8,7 +8,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
DROP TABLE IF EXISTS test; DROP TABLE IF EXISTS test;
CREATE TABLE test (a Int32, b String) CREATE TABLE test (a Int32, b String)
ENGINE = MergeTree() ORDER BY tuple() ENGINE = MergeTree() ORDER BY tuple()
@ -17,17 +17,17 @@ SETTINGS disk = disk(name = 's3_disk', type = cache, max_size = '100Ki', path =
disk_name="${CLICKHOUSE_TEST_UNIQUE_NAME}" disk_name="${CLICKHOUSE_TEST_UNIQUE_NAME}"
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
SELECT count() FROM system.disks WHERE name = '$disk_name' SELECT count() FROM system.disks WHERE name = '$disk_name'
""" """
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
DROP TABLE IF EXISTS test; DROP TABLE IF EXISTS test;
CREATE TABLE test (a Int32, b String) CREATE TABLE test (a Int32, b String)
ENGINE = MergeTree() ORDER BY tuple() ENGINE = MergeTree() ORDER BY tuple()
SETTINGS disk = disk(name = '$disk_name', type = cache, max_size = '100Ki', path = ${CLICKHOUSE_TEST_UNIQUE_NAME}, disk = s3_disk); SETTINGS disk = disk(name = '$disk_name', type = cache, max_size = '100Ki', path = ${CLICKHOUSE_TEST_UNIQUE_NAME}, disk = s3_disk);
""" """
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
SELECT count() FROM system.disks WHERE name = '$disk_name' SELECT count() FROM system.disks WHERE name = '$disk_name'
""" """

View File

@ -9,7 +9,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
disk_name="${CLICKHOUSE_TEST_UNIQUE_NAME}" disk_name="${CLICKHOUSE_TEST_UNIQUE_NAME}"
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
DROP TABLE IF EXISTS test; DROP TABLE IF EXISTS test;
CREATE TABLE test (a Int32, b String) CREATE TABLE test (a Int32, b String)
ENGINE = MergeTree() ORDER BY tuple() ENGINE = MergeTree() ORDER BY tuple()
@ -22,29 +22,29 @@ query_id=$RANDOM
$CLICKHOUSE_CLIENT --query_id "$query_id" --query "SELECT * FROM test FORMAT Null SETTINGS enable_filesystem_cache_log = 1" $CLICKHOUSE_CLIENT --query_id "$query_id" --query "SELECT * FROM test FORMAT Null SETTINGS enable_filesystem_cache_log = 1"
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY kek; SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY kek;
""" 2>&1 | grep -q "Invalid cache key hex: kek" && echo "OK" || echo "FAIL" """ 2>&1 | grep -q "Invalid cache key hex: kek" && echo "OK" || echo "FAIL"
${CLICKHOUSE_CLIENT} -q " system flush logs" ${CLICKHOUSE_CLIENT} -q " system flush logs"
key=$($CLICKHOUSE_CLIENT -nm --query """ key=$($CLICKHOUSE_CLIENT -m --query """
SELECT key FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1; SELECT key FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1;
""") """)
offset=$($CLICKHOUSE_CLIENT -nm --query """ offset=$($CLICKHOUSE_CLIENT -m --query """
SELECT offset FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1; SELECT offset FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1;
""") """)
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
SELECT count() FROM system.filesystem_cache WHERE key = '$key' AND file_segment_range_begin = $offset; SELECT count() FROM system.filesystem_cache WHERE key = '$key' AND file_segment_range_begin = $offset;
""" """
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY $key OFFSET $offset; SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY $key OFFSET $offset;
""" """
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
SELECT count() FROM system.filesystem_cache WHERE key = '$key' AND file_segment_range_begin = $offset; SELECT count() FROM system.filesystem_cache WHERE key = '$key' AND file_segment_range_begin = $offset;
""" """
@ -54,18 +54,18 @@ $CLICKHOUSE_CLIENT --query_id "$query_id" --query "SELECT * FROM test FORMAT Nul
${CLICKHOUSE_CLIENT} -q " system flush logs" ${CLICKHOUSE_CLIENT} -q " system flush logs"
key=$($CLICKHOUSE_CLIENT -nm --query """ key=$($CLICKHOUSE_CLIENT -m --query """
SELECT key FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1; SELECT key FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1;
""") """)
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
SELECT count() FROM system.filesystem_cache WHERE key = '$key'; SELECT count() FROM system.filesystem_cache WHERE key = '$key';
""" """
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY $key SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY $key
""" """
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
SELECT count() FROM system.filesystem_cache WHERE key = '$key'; SELECT count() FROM system.filesystem_cache WHERE key = '$key';
""" """

View File

@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
DROP TABLE IF EXISTS data; DROP TABLE IF EXISTS data;
DROP TABLE IF EXISTS data_1; DROP TABLE IF EXISTS data_1;
DROP TABLE IF EXISTS data_2; DROP TABLE IF EXISTS data_2;

View File

@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists data; drop table if exists data;
create table data (key Int) engine=MergeTree() order by tuple(); create table data (key Int) engine=MergeTree() order by tuple();
insert into data select * from numbers(10); insert into data select * from numbers(10);

View File

@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists data; drop table if exists data;
create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, disk='s3_disk'; create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, disk='s3_disk';
-- reading 1e6*8 bytes with 1M bandwith it should take (8-1)/1=7 seconds -- reading 1e6*8 bytes with 1M bandwith it should take (8-1)/1=7 seconds
@ -15,7 +15,7 @@ $CLICKHOUSE_CLIENT -nm -q "
query_id=$(random_str 10) query_id=$(random_str 10)
$CLICKHOUSE_CLIENT --query_id "$query_id" -q "backup table data to S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data/backup2') SETTINGS allow_s3_native_copy=1" --max_backup_bandwidth=1M > /dev/null $CLICKHOUSE_CLIENT --query_id "$query_id" -q "backup table data to S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data/backup2') SETTINGS allow_s3_native_copy=1" --max_backup_bandwidth=1M > /dev/null
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT SELECT
'native_copy', 'native_copy',
@ -26,7 +26,7 @@ $CLICKHOUSE_CLIENT -nm -q "
query_id=$(random_str 10) query_id=$(random_str 10)
$CLICKHOUSE_CLIENT --query_id "$query_id" -q "backup table data to S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data/backup3') SETTINGS allow_s3_native_copy=0" --max_backup_bandwidth=1M > /dev/null $CLICKHOUSE_CLIENT --query_id "$query_id" -q "backup table data to S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data/backup3') SETTINGS allow_s3_native_copy=0" --max_backup_bandwidth=1M > /dev/null
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT SELECT
'no_native_copy', 'no_native_copy',

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -mn -q """ $CLICKHOUSE_CLIENT -m -q """
DROP TABLE IF EXISTS t1_02867; DROP TABLE IF EXISTS t1_02867;
CREATE TABLE t1_02867 (x UInt64) ENGINE=Set(); CREATE TABLE t1_02867 (x UInt64) ENGINE=Set();
""" """
@ -39,4 +39,4 @@ repeat_truncate_insert &
sleep 10 sleep 10
$CLICKHOUSE_CLIENT -mn -q "DROP TABLE IF EXISTS t1_02867;" $CLICKHOUSE_CLIENT -m -q "DROP TABLE IF EXISTS t1_02867;"

View File

@ -10,14 +10,14 @@ echo '{"a" : 1, "obj" : {"f1" : 1, "f2" : "2020-01-01"}}' > $CLICKHOUSE_TEST_UNI
echo '{"b" : 2, "obj" : {"f3" : 2, "f2" : "Some string"}}' > $CLICKHOUSE_TEST_UNIQUE_NAME/data2.jsonl echo '{"b" : 2, "obj" : {"f3" : 2, "f2" : "Some string"}}' > $CLICKHOUSE_TEST_UNIQUE_NAME/data2.jsonl
echo '{"c" : "hello"}' > $CLICKHOUSE_TEST_UNIQUE_NAME/data3.jsonl echo '{"c" : "hello"}' > $CLICKHOUSE_TEST_UNIQUE_NAME/data3.jsonl
$CLICKHOUSE_LOCAL -nm -q " $CLICKHOUSE_LOCAL -m -q "
set schema_inference_mode = 'union'; set schema_inference_mode = 'union';
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl');
select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl') order by tuple(*) format JSONEachRow; select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl') order by tuple(*) format JSONEachRow;
select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache order by file; select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache order by file;
" "
$CLICKHOUSE_LOCAL -nm -q " $CLICKHOUSE_LOCAL -m -q "
set schema_inference_mode = 'union'; set schema_inference_mode = 'union';
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data3.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data3.jsonl');
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl');
@ -25,14 +25,14 @@ desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl');
cd $CLICKHOUSE_TEST_UNIQUE_NAME/ && tar -cf archive.tar data1.jsonl data2.jsonl data3.jsonl && cd .. cd $CLICKHOUSE_TEST_UNIQUE_NAME/ && tar -cf archive.tar data1.jsonl data2.jsonl data3.jsonl && cd ..
$CLICKHOUSE_LOCAL -nm -q " $CLICKHOUSE_LOCAL -m -q "
set schema_inference_mode = 'union'; set schema_inference_mode = 'union';
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl');
select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl') order by tuple(*) format JSONEachRow; select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl') order by tuple(*) format JSONEachRow;
select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache order by file; select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache order by file;
" "
$CLICKHOUSE_LOCAL -nm -q " $CLICKHOUSE_LOCAL -m -q "
set schema_inference_mode = 'union'; set schema_inference_mode = 'union';
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data3.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data3.jsonl');
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl');
@ -41,7 +41,7 @@ desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl');
echo 'Error' > $CLICKHOUSE_TEST_UNIQUE_NAME/data4.jsonl echo 'Error' > $CLICKHOUSE_TEST_UNIQUE_NAME/data4.jsonl
$CLICKHOUSE_LOCAL -q "desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3,4}.jsonl') settings schema_inference_mode='union'" 2>&1 | grep -c -F "CANNOT_EXTRACT_TABLE_STRUCTURE" $CLICKHOUSE_LOCAL -q "desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3,4}.jsonl') settings schema_inference_mode='union'" 2>&1 | grep -c -F "CANNOT_EXTRACT_TABLE_STRUCTURE"
$CLICKHOUSE_LOCAL -nm -q " $CLICKHOUSE_LOCAL -m -q "
set schema_inference_mode = 'union'; set schema_inference_mode = 'union';
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{2,3}.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{2,3}.jsonl');
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3,4}.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3,4}.jsonl');

View File

@ -67,7 +67,7 @@ curl "$CLICKHOUSE_URL" --silent --fail --show-error --data "SELECT sum(is_leader
wait; wait;
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
-- Check that number of ZK request is less then a half of (total replicas * concurrency) -- Check that number of ZK request is less then a half of (total replicas * concurrency)

View File

@ -8,7 +8,7 @@ CURDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists r1; drop table if exists r1;
drop table if exists r2; drop table if exists r2;
@ -64,7 +64,7 @@ function insert_duplicates() {
wait wait
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
system sync replica r1; system sync replica r1;
system sync replica r2; system sync replica r2;
" "
@ -84,7 +84,7 @@ function loop()
do do
while ! insert_duplicates while ! insert_duplicates
do do
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
truncate table r1; truncate table r1;
truncate table r2; truncate table r2;
system sync replica r1; system sync replica r1;
@ -137,8 +137,8 @@ function list_keeper_nodes() {
list_keeper_nodes "${table_shared_id}" list_keeper_nodes "${table_shared_id}"
$CLICKHOUSE_CLIENT -nm -q "drop table r1;" --allow_repeated_settings --send_logs_level="error" & $CLICKHOUSE_CLIENT -m -q "drop table r1;" --allow_repeated_settings --send_logs_level="error" &
$CLICKHOUSE_CLIENT -nm -q "drop table r2;" --allow_repeated_settings --send_logs_level="error" & $CLICKHOUSE_CLIENT -m -q "drop table r2;" --allow_repeated_settings --send_logs_level="error" &
wait wait
list_keeper_nodes "${table_shared_id}" list_keeper_nodes "${table_shared_id}"

View File

@ -10,11 +10,11 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT" | sed 's/--session_timezone[= ][^ ]*//g'`" CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT" | sed 's/--session_timezone[= ][^ ]*//g'`"
CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_materialized_view=1 --session_timezone Etc/UTC"`" CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_materialized_view=1 --session_timezone Etc/UTC"`"
$CLICKHOUSE_CLIENT -nq "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view" $CLICKHOUSE_CLIENT -q "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view"
# Basic refreshing. # Basic refreshing.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create materialized view a create materialized view a
refresh after 2 second refresh after 2 second
engine Memory engine Memory
@ -23,41 +23,41 @@ $CLICKHOUSE_CLIENT -nq "
select '<1: created view>', view, remaining_dependencies, exception, last_refresh_result in ('Unknown', 'Finished') from refreshes; select '<1: created view>', view, remaining_dependencies, exception, last_refresh_result in ('Unknown', 'Finished') from refreshes;
show create a;" show create a;"
# Wait for any refresh. (xargs trims the string and turns \t and \n into spaces) # Wait for any refresh. (xargs trims the string and turns \t and \n into spaces)
while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" == 'Unknown' ] while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes -- $LINENO" | xargs`" == 'Unknown' ]
do do
sleep 0.5 sleep 0.5
done done
start_time="`$CLICKHOUSE_CLIENT -nq "select reinterpret(now64(), 'Int64')"`" start_time="`$CLICKHOUSE_CLIENT -q "select reinterpret(now64(), 'Int64')"`"
# Check table contents. # Check table contents.
$CLICKHOUSE_CLIENT -nq "select '<2: refreshed>', count(), sum(x=0), sum(x=1) from a" $CLICKHOUSE_CLIENT -q "select '<2: refreshed>', count(), sum(x=0), sum(x=1) from a"
# Wait for table contents to change. # Wait for table contents to change.
res1="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values'`" res1="`$CLICKHOUSE_CLIENT -q 'select * from a order by x format Values'`"
while : while :
do do
res2="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values -- $LINENO'`" res2="`$CLICKHOUSE_CLIENT -q 'select * from a order by x format Values -- $LINENO'`"
[ "$res2" == "$res1" ] || break [ "$res2" == "$res1" ] || break
sleep 0.5 sleep 0.5
done done
# Wait for another change. # Wait for another change.
while : while :
do do
res3="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values -- $LINENO'`" res3="`$CLICKHOUSE_CLIENT -q 'select * from a order by x format Values -- $LINENO'`"
[ "$res3" == "$res2" ] || break [ "$res3" == "$res2" ] || break
sleep 0.5 sleep 0.5
done done
# Check that the two changes were at least 1 second apart, in particular that we're not refreshing # Check that the two changes were at least 1 second apart, in particular that we're not refreshing
# like crazy. This is potentially flaky, but we need at least one test that uses non-mocked timer # like crazy. This is potentially flaky, but we need at least one test that uses non-mocked timer
# to make sure the clock+timer code works at all. If it turns out flaky, increase refresh period above. # to make sure the clock+timer code works at all. If it turns out flaky, increase refresh period above.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<3: time difference at least>', min2(reinterpret(now64(), 'Int64') - $start_time, 1000); select '<3: time difference at least>', min2(reinterpret(now64(), 'Int64') - $start_time, 1000);
select '<4: next refresh in>', next_refresh_time-last_refresh_time from refreshes;" select '<4: next refresh in>', next_refresh_time-last_refresh_time from refreshes;"
# Create a source table from which views will read. # Create a source table from which views will read.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create table src (x Int8) engine Memory as select 1;" create table src (x Int8) engine Memory as select 1;"
# Switch to fake clock, change refresh schedule, change query. # Switch to fake clock, change refresh schedule, change query.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
system test view a set fake time '2050-01-01 00:00:01'; system test view a set fake time '2050-01-01 00:00:01';
system wait view a; system wait view a;
system refresh view a; system refresh view a;
@ -68,19 +68,19 @@ $CLICKHOUSE_CLIENT -nq "
select '<4.5: altered>', status, last_refresh_result, next_refresh_time from refreshes; select '<4.5: altered>', status, last_refresh_result, next_refresh_time from refreshes;
show create a;" show create a;"
# Advance time to trigger the refresh. # Advance time to trigger the refresh.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<5: no refresh>', count() from a; select '<5: no refresh>', count() from a;
system test view a set fake time '2052-02-03 04:05:06';" system test view a set fake time '2052-02-03 04:05:06';"
while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_time from refreshes -- $LINENO" | xargs`" != '2052-02-03 04:05:06' ] while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_time from refreshes -- $LINENO" | xargs`" != '2052-02-03 04:05:06' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<6: refreshed>', * from a; select '<6: refreshed>', * from a;
select '<7: refreshed>', status, last_refresh_result, next_refresh_time from refreshes;" select '<7: refreshed>', status, last_refresh_result, next_refresh_time from refreshes;"
# Create a dependent view, refresh it once. # Create a dependent view, refresh it once.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create materialized view b refresh every 2 year depends on a (y Int32) engine MergeTree order by y empty as select x*10 as y from a; create materialized view b refresh every 2 year depends on a (y Int32) engine MergeTree order by y empty as select x*10 as y from a;
show create b; show create b;
system test view b set fake time '2052-11-11 11:11:11'; system test view b set fake time '2052-11-11 11:11:11';
@ -88,89 +88,89 @@ $CLICKHOUSE_CLIENT -nq "
system wait view b; system wait view b;
select '<7.5: created dependent>', last_refresh_time from refreshes where view = 'b';" select '<7.5: created dependent>', last_refresh_time from refreshes where view = 'b';"
# Next refresh shouldn't start until the dependency refreshes. # Next refresh shouldn't start until the dependency refreshes.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<8: refreshed>', * from b; select '<8: refreshed>', * from b;
select '<9: refreshed>', view, status, last_refresh_result, next_refresh_time from refreshes; select '<9: refreshed>', view, status, last_refresh_result, next_refresh_time from refreshes;
system test view b set fake time '2054-01-24 23:22:21';" system test view b set fake time '2054-01-24 23:22:21';"
while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies 2054-01-01 00:00:00' ] while [ "`$CLICKHOUSE_CLIENT -q "select status, next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies 2054-01-01 00:00:00' ]
do do
sleep 0.5 sleep 0.5
done done
# Drop the source table, check that refresh fails and doesn't leave a temp table behind. # Drop the source table, check that refresh fails and doesn't leave a temp table behind.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<9.2: dropping>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase(); select '<9.2: dropping>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase();
drop table src; drop table src;
system refresh view a;" system refresh view a;"
$CLICKHOUSE_CLIENT -nq "system wait view a;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO" $CLICKHOUSE_CLIENT -q "system wait view a;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO"
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<9.4: dropped>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase();" select '<9.4: dropped>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase();"
# Create the source table again, check that refresh succeeds (in particular that tables are looked # Create the source table again, check that refresh succeeds (in particular that tables are looked
# up by name rather than uuid). # up by name rather than uuid).
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<10: creating>', view, status, remaining_dependencies, next_refresh_time from refreshes; select '<10: creating>', view, status, remaining_dependencies, next_refresh_time from refreshes;
create table src (x Int16) engine Memory as select 2; create table src (x Int16) engine Memory as select 2;
system test view a set fake time '2054-01-01 00:00:01';" system test view a set fake time '2054-01-01 00:00:01';"
while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled' ] while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled' ]
do do
sleep 0.5 sleep 0.5
done done
# Both tables should've refreshed. # Both tables should've refreshed.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<11: chain-refreshed a>', * from a; select '<11: chain-refreshed a>', * from a;
select '<12: chain-refreshed b>', * from b; select '<12: chain-refreshed b>', * from b;
select '<13: chain-refreshed>', view, status, remaining_dependencies, last_refresh_result, last_refresh_time, next_refresh_time, exception == '' from refreshes;" select '<13: chain-refreshed>', view, status, remaining_dependencies, last_refresh_result, last_refresh_time, next_refresh_time, exception == '' from refreshes;"
# Make the dependent table run ahead by one refresh cycle, make sure it waits for the dependency to # Make the dependent table run ahead by one refresh cycle, make sure it waits for the dependency to
# catch up to the same cycle. # catch up to the same cycle.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
system test view b set fake time '2059-01-01 00:00:00'; system test view b set fake time '2059-01-01 00:00:00';
system refresh view b;" system refresh view b;"
while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2060-01-01 00:00:00' ] while [ "`$CLICKHOUSE_CLIENT -q "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2060-01-01 00:00:00' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
system test view b set fake time '2061-01-01 00:00:00'; system test view b set fake time '2061-01-01 00:00:00';
system test view a set fake time '2057-01-01 00:00:00';" system test view a set fake time '2057-01-01 00:00:00';"
while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshes -- $LINENO" | xargs`" != 'Scheduled 2058-01-01 00:00:00 WaitingForDependencies 2060-01-01 00:00:00' ] while [ "`$CLICKHOUSE_CLIENT -q "select status, next_refresh_time from refreshes -- $LINENO" | xargs`" != 'Scheduled 2058-01-01 00:00:00 WaitingForDependencies 2060-01-01 00:00:00' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<14: waiting for next cycle>', view, status, remaining_dependencies, next_refresh_time from refreshes; select '<14: waiting for next cycle>', view, status, remaining_dependencies, next_refresh_time from refreshes;
truncate src; truncate src;
insert into src values (3); insert into src values (3);
system test view a set fake time '2060-02-02 02:02:02';" system test view a set fake time '2060-02-02 02:02:02';"
while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2062-01-01 00:00:00' ] while [ "`$CLICKHOUSE_CLIENT -q "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2062-01-01 00:00:00' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<15: chain-refreshed a>', * from a; select '<15: chain-refreshed a>', * from a;
select '<16: chain-refreshed b>', * from b; select '<16: chain-refreshed b>', * from b;
select '<17: chain-refreshed>', view, status, next_refresh_time from refreshes;" select '<17: chain-refreshed>', view, status, next_refresh_time from refreshes;"
# Get to WaitingForDependencies state and remove the depencency. # Get to WaitingForDependencies state and remove the depencency.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
system test view b set fake time '2062-03-03 03:03:03'" system test view b set fake time '2062-03-03 03:03:03'"
while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies' ] while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
alter table b modify refresh every 2 year" alter table b modify refresh every 2 year"
while [ "`$CLICKHOUSE_CLIENT -nq "select status, last_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled 2062-03-03 03:03:03' ] while [ "`$CLICKHOUSE_CLIENT -q "select status, last_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled 2062-03-03 03:03:03' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<18: removed dependency>', view, status, remaining_dependencies, last_refresh_time,next_refresh_time, refresh_count from refreshes where view = 'b'; select '<18: removed dependency>', view, status, remaining_dependencies, last_refresh_time,next_refresh_time, refresh_count from refreshes where view = 'b';
show create b;" show create b;"
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
drop table src; drop table src;
drop table a; drop table a;
drop table b; drop table b;

View File

@ -12,29 +12,29 @@ CLICKHOUSE_LOG_COMMENT=
CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT" | sed 's/--session_timezone[= ][^ ]*//g'`" CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT" | sed 's/--session_timezone[= ][^ ]*//g'`"
CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_materialized_view=1 --allow_materialized_view_with_bad_select=0 --session_timezone Etc/UTC"`" CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_materialized_view=1 --allow_materialized_view_with_bad_select=0 --session_timezone Etc/UTC"`"
$CLICKHOUSE_CLIENT -nq "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view" $CLICKHOUSE_CLIENT -q "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view"
# Select from a table that doesn't exist, get an exception. # Select from a table that doesn't exist, get an exception.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create table src (x Int8) engine Memory as select 1; create table src (x Int8) engine Memory as select 1;
create materialized view c refresh every 1 second (x Int64) engine Memory empty as select * from src; create materialized view c refresh every 1 second (x Int64) engine Memory empty as select * from src;
drop table src;" drop table src;"
while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes where view = 'c' -- $LINENO" | xargs`" != 'Error' ] while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes where view = 'c' -- $LINENO" | xargs`" != 'Error' ]
do do
sleep 0.5 sleep 0.5
done done
# Check exception, create src, expect successful refresh. # Check exception, create src, expect successful refresh.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<19: exception>', exception ilike '%UNKNOWN_TABLE%' ? '1' : exception from refreshes where view = 'c'; select '<19: exception>', exception ilike '%UNKNOWN_TABLE%' ? '1' : exception from refreshes where view = 'c';
create table src (x Int64) engine Memory as select 1; create table src (x Int64) engine Memory as select 1;
system refresh view c;" system refresh view c;"
while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ]
do do
sleep 0.5 sleep 0.5
done done
# Rename table. # Rename table.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<20: unexception>', * from c; select '<20: unexception>', * from c;
rename table c to d; rename table c to d;
select '<21: rename>', * from d; select '<21: rename>', * from d;
@ -42,130 +42,130 @@ $CLICKHOUSE_CLIENT -nq "
# Do various things during a refresh. # Do various things during a refresh.
# First make a nonempty view. # First make a nonempty view.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
drop table d; drop table d;
truncate src; truncate src;
insert into src values (1); insert into src values (1);
create materialized view e refresh every 1 second (x Int64) engine MergeTree order by x empty as select x + sleepEachRow(1) as x from src settings max_block_size = 1;" create materialized view e refresh every 1 second (x Int64) engine MergeTree order by x empty as select x + sleepEachRow(1) as x from src settings max_block_size = 1;"
while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ]
do do
sleep 0.5 sleep 0.5
done done
# Stop refreshes. # Stop refreshes.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<23: simple refresh>', * from e; select '<23: simple refresh>', * from e;
system stop view e;" system stop view e;"
while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Disabled' ] while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes -- $LINENO" | xargs`" != 'Disabled' ]
do do
sleep 0.5 sleep 0.5
done done
# Make refreshes slow, wait for a slow refresh to start. (We stopped refreshes first to make sure # Make refreshes slow, wait for a slow refresh to start. (We stopped refreshes first to make sure
# we wait for a slow refresh, not a previous fast one.) # we wait for a slow refresh, not a previous fast one.)
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
insert into src select * from numbers(1000) settings max_block_size=1; insert into src select * from numbers(1000) settings max_block_size=1;
system start view e;" system start view e;"
while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Running' ] while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes -- $LINENO" | xargs`" != 'Running' ]
do do
sleep 0.5 sleep 0.5
done done
# Rename. # Rename.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
rename table e to f; rename table e to f;
select '<24: rename during refresh>', * from f; select '<24: rename during refresh>', * from f;
select '<25: rename during refresh>', view, status from refreshes where view = 'f'; select '<25: rename during refresh>', view, status from refreshes where view = 'f';
alter table f modify refresh after 10 year;" alter table f modify refresh after 10 year;"
# Cancel. # Cancel.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
system cancel view f;" system cancel view f;"
while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Scheduled' ] while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Scheduled' ]
do do
sleep 0.5 sleep 0.5
done done
# Check that another refresh doesn't immediately start after the cancelled one. # Check that another refresh doesn't immediately start after the cancelled one.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<27: cancelled>', view, status, last_refresh_result from refreshes where view = 'f'; select '<27: cancelled>', view, status, last_refresh_result from refreshes where view = 'f';
system refresh view f;" system refresh view f;"
while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Running' ] while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Running' ]
do do
sleep 0.5 sleep 0.5
done done
# Drop. # Drop.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
drop table f; drop table f;
select '<28: drop during refresh>', view, status from refreshes; select '<28: drop during refresh>', view, status from refreshes;
select '<28: drop during refresh>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase()" select '<28: drop during refresh>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase()"
# Try OFFSET and RANDOMIZE FOR. # Try OFFSET and RANDOMIZE FOR.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create materialized view g refresh every 1 week offset 3 day 4 hour randomize for 4 day 1 hour (x Int64) engine Memory empty as select 42 as x; create materialized view g refresh every 1 week offset 3 day 4 hour randomize for 4 day 1 hour (x Int64) engine Memory empty as select 42 as x;
show create g; show create g;
system test view g set fake time '2050-02-03 15:30:13';" system test view g set fake time '2050-02-03 15:30:13';"
while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time > '2049-01-01' from refreshes -- $LINENO" | xargs`" != '1' ] while [ "`$CLICKHOUSE_CLIENT -q "select next_refresh_time > '2049-01-01' from refreshes -- $LINENO" | xargs`" != '1' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
with '2050-02-10 04:00:00'::DateTime as expected with '2050-02-10 04:00:00'::DateTime as expected
select '<29: randomize>', abs(next_refresh_time::Int64 - expected::Int64) <= 3600*(24*4+1), next_refresh_time != expected from refreshes;" select '<29: randomize>', abs(next_refresh_time::Int64 - expected::Int64) <= 3600*(24*4+1), next_refresh_time != expected from refreshes;"
# Send data 'TO' an existing table. # Send data 'TO' an existing table.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
drop table g; drop table g;
create table dest (x Int64) engine MergeTree order by x; create table dest (x Int64) engine MergeTree order by x;
truncate src; truncate src;
insert into src values (1); insert into src values (1);
create materialized view h refresh every 1 second to dest empty as select x*10 as x from src; create materialized view h refresh every 1 second to dest empty as select x*10 as x from src;
show create h;" show create h;"
while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<30: to existing table>', * from dest; select '<30: to existing table>', * from dest;
insert into src values (2);" insert into src values (2);"
while [ "`$CLICKHOUSE_CLIENT -nq "select count() from dest -- $LINENO" | xargs`" != '2' ] while [ "`$CLICKHOUSE_CLIENT -q "select count() from dest -- $LINENO" | xargs`" != '2' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<31: to existing table>', * from dest; select '<31: to existing table>', * from dest;
drop table dest; drop table dest;
drop table h;" drop table h;"
# Retries. # Retries.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create materialized view h2 refresh after 1 year settings refresh_retries = 10 (x Int64) engine Memory as select x*10 + throwIf(x % 2 == 0) as x from src;" create materialized view h2 refresh after 1 year settings refresh_retries = 10 (x Int64) engine Memory as select x*10 + throwIf(x % 2 == 0) as x from src;"
$CLICKHOUSE_CLIENT -nq "system wait view h2;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO" $CLICKHOUSE_CLIENT -q "system wait view h2;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO"
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<31.5: will retry>', last_refresh_result, retry > 0 from refreshes; select '<31.5: will retry>', last_refresh_result, retry > 0 from refreshes;
create table src2 (x Int8) engine Memory; create table src2 (x Int8) engine Memory;
insert into src2 values (1); insert into src2 values (1);
exchange tables src and src2; exchange tables src and src2;
drop table src2;" drop table src2;"
while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result, retry from refreshes -- $LINENO" | xargs`" != 'Finished 0' ] while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result, retry from refreshes -- $LINENO" | xargs`" != 'Finished 0' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<31.6: did retry>', x from h2; select '<31.6: did retry>', x from h2;
drop table h2" drop table h2"
# EMPTY # EMPTY
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create materialized view i refresh after 1 year engine Memory empty as select number as x from numbers(2); create materialized view i refresh after 1 year engine Memory empty as select number as x from numbers(2);
create materialized view j refresh after 1 year engine Memory as select number as x from numbers(2);" create materialized view j refresh after 1 year engine Memory as select number as x from numbers(2);"
while [ "`$CLICKHOUSE_CLIENT -nq "select sum(last_success_time is null) from refreshes -- $LINENO" | xargs`" == '2' ] while [ "`$CLICKHOUSE_CLIENT -q "select sum(last_success_time is null) from refreshes -- $LINENO" | xargs`" == '2' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<32: empty>', view, status, last_refresh_result, retry from refreshes order by view; select '<32: empty>', view, status, last_refresh_result, retry from refreshes order by view;
drop table i; drop table i;
drop table j;" drop table j;"
# APPEND # APPEND
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create materialized view k refresh every 10 year append (x Int64) engine Memory empty as select x*10 as x from src; create materialized view k refresh every 10 year append (x Int64) engine Memory empty as select x*10 as x from src;
select '<33: append>', * from k; select '<33: append>', * from k;
system refresh view k; system refresh view k;
@ -177,7 +177,7 @@ $CLICKHOUSE_CLIENT -nq "
system wait view k; system wait view k;
select '<35: append>', * from k order by x;" select '<35: append>', * from k order by x;"
# ALTER to non-APPEND # ALTER to non-APPEND
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
alter table k modify refresh every 10 year; alter table k modify refresh every 10 year;
system wait view k; system wait view k;
system refresh view k; system refresh view k;
@ -187,7 +187,7 @@ $CLICKHOUSE_CLIENT -nq "
truncate table src;" truncate table src;"
# APPEND + TO + regular materialized view reading from it. # APPEND + TO + regular materialized view reading from it.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create table mid (x Int64) engine MergeTree order by x; create table mid (x Int64) engine MergeTree order by x;
create materialized view l refresh every 10 year append to mid empty as select x*10 as x from src; create materialized view l refresh every 10 year append to mid empty as select x*10 as x from src;
create materialized view m (x Int64) engine Memory as select x*10 as x from mid; create materialized view m (x Int64) engine Memory as select x*10 as x from mid;
@ -204,19 +204,19 @@ $CLICKHOUSE_CLIENT -nq "
drop table mid;" drop table mid;"
# Failing to create inner table. # Failing to create inner table.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create materialized view n refresh every 1 second (x Int64) engine MergeTree as select 1 as x from numbers(2);" 2>/dev/null || echo "creating MergeTree without ORDER BY failed, as expected" create materialized view n refresh every 1 second (x Int64) engine MergeTree as select 1 as x from numbers(2);" 2>/dev/null || echo "creating MergeTree without ORDER BY failed, as expected"
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create materialized view n refresh every 1 second (x Int64) engine MergeTree order by x as select 1 as x from numbers(2); create materialized view n refresh every 1 second (x Int64) engine MergeTree order by x as select 1 as x from numbers(2);
drop table n;" drop table n;"
# Reading from table that doesn't exist yet. # Reading from table that doesn't exist yet.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create materialized view o refresh every 1 second (x Int64) engine Memory as select x from nonexist; -- { serverError UNKNOWN_TABLE } create materialized view o refresh every 1 second (x Int64) engine Memory as select x from nonexist; -- { serverError UNKNOWN_TABLE }
create materialized view o (x Int64) engine Memory as select x from nonexist; -- { serverError UNKNOWN_TABLE } create materialized view o (x Int64) engine Memory as select x from nonexist; -- { serverError UNKNOWN_TABLE }
create materialized view o (x Int64) engine Memory as select x from nope.nonexist; -- { serverError UNKNOWN_DATABASE } create materialized view o (x Int64) engine Memory as select x from nope.nonexist; -- { serverError UNKNOWN_DATABASE }
create materialized view o refresh every 1 second (x Int64) engine Memory as select x from nope.nonexist settings allow_materialized_view_with_bad_select = 1; create materialized view o refresh every 1 second (x Int64) engine Memory as select x from nope.nonexist settings allow_materialized_view_with_bad_select = 1;
drop table o;" drop table o;"
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
drop table refreshes;" drop table refreshes;"

View File

@ -10,7 +10,7 @@ CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspic
function test1_insert() function test1_insert()
{ {
echo "test1 insert" echo "test1 insert"
$CH_CLIENT -nmq "insert into test select number, NULL from numbers(3); $CH_CLIENT -mq "insert into test select number, NULL from numbers(3);
insert into test select number + 3, number from numbers(3); insert into test select number + 3, number from numbers(3);
insert into test select number + 6, ('str_' || toString(number))::Variant(String) from numbers(3); insert into test select number + 6, ('str_' || toString(number))::Variant(String) from numbers(3);
insert into test select number + 9, ('lc_str_' || toString(number))::LowCardinality(String) from numbers(3); insert into test select number + 9, ('lc_str_' || toString(number))::LowCardinality(String) from numbers(3);
@ -21,7 +21,7 @@ insert into test select number + 15, range(number + 1)::Array(UInt64) from numbe
function test1_select() function test1_select()
{ {
echo "test1 select" echo "test1 select"
$CH_CLIENT -nmq "select v from test order by id; $CH_CLIENT -mq "select v from test order by id;
select v.String from test order by id; select v.String from test order by id;
select v.UInt64 from test order by id; select v.UInt64 from test order by id;
select v.\`LowCardinality(String)\` from test order by id; select v.\`LowCardinality(String)\` from test order by id;
@ -36,7 +36,7 @@ select v.\`Array(UInt64)\`.size0 from test order by id;"
function test2_insert() function test2_insert()
{ {
echo "test2 insert" echo "test2 insert"
$CH_CLIENT -nmq "insert into test select number, NULL from numbers(3); $CH_CLIENT -mq "insert into test select number, NULL from numbers(3);
insert into test select number + 3, number % 2 ? NULL : number from numbers(3); insert into test select number + 3, number % 2 ? NULL : number from numbers(3);
insert into test select number + 6, number % 2 ? NULL : ('str_' || toString(number))::Variant(String) from numbers(3); insert into test select number + 6, number % 2 ? NULL : ('str_' || toString(number))::Variant(String) from numbers(3);
insert into test select number + 9, number % 2 ? CAST(NULL, 'Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))') : CAST(('lc_str_' || toString(number))::LowCardinality(String), 'Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))') from numbers(3); insert into test select number + 9, number % 2 ? CAST(NULL, 'Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))') : CAST(('lc_str_' || toString(number))::LowCardinality(String), 'Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))') from numbers(3);
@ -47,7 +47,7 @@ insert into test select number + 15, number % 2 ? CAST(NULL, 'Variant(String, UI
function test2_select() function test2_select()
{ {
echo "test2 select" echo "test2 select"
$CH_CLIENT -nmq "select v from test order by id; $CH_CLIENT -mq "select v from test order by id;
select v.String from test order by id; select v.String from test order by id;
select v.UInt64 from test order by id; select v.UInt64 from test order by id;
select v.\`LowCardinality(String)\` from test order by id; select v.\`LowCardinality(String)\` from test order by id;
@ -68,7 +68,7 @@ function test3_insert()
function test3_select() function test3_select()
{ {
echo "test3 select" echo "test3 select"
$CH_CLIENT -nmq "select v from test order by id; $CH_CLIENT -mq "select v from test order by id;
select v.String from test order by id; select v.String from test order by id;
select v.UInt64 from test order by id; select v.UInt64 from test order by id;
select v.\`LowCardinality(String)\` from test order by id; select v.\`LowCardinality(String)\` from test order by id;

View File

@ -10,7 +10,7 @@ CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspic
function test4_insert() function test4_insert()
{ {
echo "test4 insert" echo "test4 insert"
$CH_CLIENT -nmq "insert into test select number, NULL from numbers(100000); $CH_CLIENT -mq "insert into test select number, NULL from numbers(100000);
insert into test select number + 100000, number from numbers(100000); insert into test select number + 100000, number from numbers(100000);
insert into test select number + 200000, ('str_' || toString(number))::Variant(String) from numbers(100000); insert into test select number + 200000, ('str_' || toString(number))::Variant(String) from numbers(100000);
insert into test select number + 300000, ('lc_str_' || toString(number))::LowCardinality(String) from numbers(100000); insert into test select number + 300000, ('lc_str_' || toString(number))::LowCardinality(String) from numbers(100000);
@ -21,7 +21,7 @@ insert into test select number + 500000, range(number % 20 + 1)::Array(UInt64) f
function test4_select function test4_select
{ {
echo "test4 select" echo "test4 select"
$CH_CLIENT -nmq "select v from test format Null; $CH_CLIENT -mq "select v from test format Null;
select count() from test where isNotNull(v); select count() from test where isNotNull(v);
select v.String from test format Null; select v.String from test format Null;
select count() from test where isNotNull(v.String); select count() from test where isNotNull(v.String);

View File

@ -10,7 +10,7 @@ CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspic
function test5_insert() function test5_insert()
{ {
echo "test5 insert" echo "test5 insert"
$CH_CLIENT -nmq " $CH_CLIENT -mq "
insert into test select number, NULL from numbers(200000); insert into test select number, NULL from numbers(200000);
insert into test select number + 200000, number % 2 ? NULL : number from numbers(200000); insert into test select number + 200000, number % 2 ? NULL : number from numbers(200000);
insert into test select number + 400000, number % 2 ? NULL : ('str_' || toString(number))::Variant(String) from numbers(200000); insert into test select number + 400000, number % 2 ? NULL : ('str_' || toString(number))::Variant(String) from numbers(200000);
@ -22,7 +22,7 @@ insert into test select number + 1000000, number % 2 ? CAST(NULL, 'Variant(Strin
function test5_select() function test5_select()
{ {
echo "test5 select" echo "test5 select"
$CH_CLIENT -nmq " $CH_CLIENT -mq "
select v from test format Null; select v from test format Null;
select count() from test where isNotNull(v); select count() from test where isNotNull(v);
select v.String from test format Null; select v.String from test format Null;

View File

@ -17,7 +17,7 @@ function test6_insert()
function test6_select() function test6_select()
{ {
echo "test6 select" echo "test6 select"
$CH_CLIENT -nmq "select v from test format Null; $CH_CLIENT -mq "select v from test format Null;
select count() from test where isNotNull(v); select count() from test where isNotNull(v);
select v.String from test format Null; select v.String from test format Null;
select count() from test where isNotNull(v.String); select count() from test where isNotNull(v.String);

View File

@ -10,7 +10,7 @@ disk_name="s3_cache_02944"
$CLICKHOUSE_CLIENT --query "SYSTEM DROP FILESYSTEM CACHE" $CLICKHOUSE_CLIENT --query "SYSTEM DROP FILESYSTEM CACHE"
$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" $CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'"
$CLICKHOUSE_CLIENT -nm --query " $CLICKHOUSE_CLIENT -m --query "
DROP TABLE IF EXISTS test; DROP TABLE IF EXISTS test;
CREATE TABLE test (a String) engine=MergeTree() ORDER BY tuple() SETTINGS disk = '$disk_name'; CREATE TABLE test (a String) engine=MergeTree() ORDER BY tuple() SETTINGS disk = '$disk_name';
INSERT INTO test SELECT randomString(100); INSERT INTO test SELECT randomString(100);
@ -33,7 +33,7 @@ cat $config_path \
> $config_path_tmp > $config_path_tmp
mv $config_path_tmp $config_path mv $config_path_tmp $config_path
$CLICKHOUSE_CLIENT -nm --query " $CLICKHOUSE_CLIENT -m --query "
set send_logs_level='fatal'; set send_logs_level='fatal';
SYSTEM RELOAD CONFIG" SYSTEM RELOAD CONFIG"
$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" $CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'"
@ -47,7 +47,7 @@ cat $config_path \
> $config_path_tmp > $config_path_tmp
mv $config_path_tmp $config_path mv $config_path_tmp $config_path
$CLICKHOUSE_CLIENT -nm --query " $CLICKHOUSE_CLIENT -m --query "
set send_logs_level='fatal'; set send_logs_level='fatal';
SYSTEM RELOAD CONFIG" SYSTEM RELOAD CONFIG"
$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" $CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'"
@ -63,7 +63,7 @@ cat $config_path \
> $config_path_tmp > $config_path_tmp
mv $config_path_tmp $config_path mv $config_path_tmp $config_path
$CLICKHOUSE_CLIENT -nm --query " $CLICKHOUSE_CLIENT -m --query "
set send_logs_level='fatal'; set send_logs_level='fatal';
SYSTEM RELOAD CONFIG" SYSTEM RELOAD CONFIG"
$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" $CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'"
@ -77,7 +77,7 @@ cat $config_path \
> $config_path_tmp > $config_path_tmp
mv $config_path_tmp $config_path mv $config_path_tmp $config_path
$CLICKHOUSE_CLIENT -nm --query " $CLICKHOUSE_CLIENT -m --query "
set send_logs_level='fatal'; set send_logs_level='fatal';
SYSTEM RELOAD CONFIG" SYSTEM RELOAD CONFIG"
$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" $CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'"

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_LOCAL -nm -q "CREATE TABLE test_table (geom MultiPolygon) engine=MergeTree ORDER BY geom; $CLICKHOUSE_LOCAL -m -q "CREATE TABLE test_table (geom MultiPolygon) engine=MergeTree ORDER BY geom;
INSERT INTO test_table SELECT * FROM file('$CURDIR/data_parquet/02960_polygon_bound_bug.parquet', Parquet); INSERT INTO test_table SELECT * FROM file('$CURDIR/data_parquet/02960_polygon_bound_bug.parquet', Parquet);
CREATE DICTIONARY test_dict (geom MultiPolygon) PRIMARY KEY geom SOURCE (CLICKHOUSE(TABLE 'test_table')) LIFETIME(MIN 0 MAX 0) LAYOUT(POLYGON(STORE_POLYGON_KEY_COLUMN 1)); CREATE DICTIONARY test_dict (geom MultiPolygon) PRIMARY KEY geom SOURCE (CLICKHOUSE(TABLE 'test_table')) LIFETIME(MIN 0 MAX 0) LAYOUT(POLYGON(STORE_POLYGON_KEY_COLUMN 1));
SELECT dictHas(test_dict,(174.84729269276494,-36.99524960275426));" SELECT dictHas(test_dict,(174.84729269276494,-36.99524960275426));"

View File

@ -24,7 +24,7 @@ cat $config_path \
> $config_path_tmp > $config_path_tmp
mv $config_path_tmp $config_path mv $config_path_tmp $config_path
$CLICKHOUSE_CLIENT -nm --query " $CLICKHOUSE_CLIENT -m --query "
set send_logs_level='error'; set send_logs_level='error';
SYSTEM RELOAD CONFIG" 2>&1 | grep -c 'volume_priority values must be unique across the policy' SYSTEM RELOAD CONFIG" 2>&1 | grep -c 'volume_priority values must be unique across the policy'
@ -40,7 +40,7 @@ cat $config_path \
> $config_path_tmp > $config_path_tmp
mv $config_path_tmp $config_path mv $config_path_tmp $config_path
$CLICKHOUSE_CLIENT -nm --query " $CLICKHOUSE_CLIENT -m --query "
set send_logs_level='error'; set send_logs_level='error';
SYSTEM RELOAD CONFIG" 2>&1 | grep -c 'volume_priority values must cover the range from 1 to N (lowest priority specified) without gaps' SYSTEM RELOAD CONFIG" 2>&1 | grep -c 'volume_priority values must cover the range from 1 to N (lowest priority specified) without gaps'

View File

@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
disk_name="02963_remote_read_bug" disk_name="02963_remote_read_bug"
$CLICKHOUSE_CLIENT -nm --query " $CLICKHOUSE_CLIENT -m --query "
DROP TABLE IF EXISTS test; DROP TABLE IF EXISTS test;
CREATE TABLE test (a Int32, s String) CREATE TABLE test (a Int32, s String)
@ -22,7 +22,7 @@ OPTIMIZE TABLE test FINAL;
query_id=$(random_str 10) query_id=$(random_str 10)
$CLICKHOUSE_CLIENT -nm --query_id "$query_id" --query " $CLICKHOUSE_CLIENT -m --query_id "$query_id" --query "
WITH RANDOM_SET AS ( WITH RANDOM_SET AS (
SELECT rand32() % 10000 FROM numbers(100) SELECT rand32() % 10000 FROM numbers(100)
) )
@ -37,7 +37,7 @@ SETTINGS
merge_tree_min_bytes_for_concurrent_read_for_remote_filesystem = 1, merge_tree_min_rows_for_concurrent_read_for_remote_filesystem = 1; merge_tree_min_bytes_for_concurrent_read_for_remote_filesystem = 1, merge_tree_min_rows_for_concurrent_read_for_remote_filesystem = 1;
" "
$CLICKHOUSE_CLIENT -nm --query " $CLICKHOUSE_CLIENT -m --query "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
-- This threshold was determined experimentally - before the fix this ratio had values around 50K -- This threshold was determined experimentally - before the fix this ratio had values around 50K

View File

@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists num_1; drop table if exists num_1;
drop table if exists num_2; drop table if exists num_2;

View File

@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists num_1; drop table if exists num_1;
drop table if exists num_2; drop table if exists num_2;

View File

@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists num_1; drop table if exists num_1;
drop table if exists num_2; drop table if exists num_2;

View File

@ -24,12 +24,12 @@ $CLICKHOUSE_LOCAL -q "select * from generateRandom('a UInt64, b String, c Array(
$CLICKHOUSE_LOCAL -q "desc file('$DATA_FILE', auto, 'a UInt64, b String, c Array(UInt64), d Tuple(a UInt64, b String)')" $CLICKHOUSE_LOCAL -q "desc file('$DATA_FILE', auto, 'a UInt64, b String, c Array(UInt64), d Tuple(a UInt64, b String)')"
$CLICKHOUSE_LOCAL -nmq " $CLICKHOUSE_LOCAL -mq "
desc file('$DATA_FILE'); desc file('$DATA_FILE');
desc file('$DATA_FILE'); desc file('$DATA_FILE');
" "
$CLICKHOUSE_LOCAL -nmq " $CLICKHOUSE_LOCAL -mq "
desc file('$DATA_FILE', JSONEachRow); desc file('$DATA_FILE', JSONEachRow);
desc file('$DATA_FILE'); desc file('$DATA_FILE');
" "
@ -39,7 +39,7 @@ $CLICKHOUSE_LOCAL -q "select * from generateRandom('a UInt64, b String, c Array(
$CLICKHOUSE_LOCAL -q "desc file('$DATA_FILE.{1,2}')" $CLICKHOUSE_LOCAL -q "desc file('$DATA_FILE.{1,2}')"
$CLICKHOUSE_LOCAL -q "desc file('$DATA_FILE.{1,2}') settings schema_inference_mode='union'" 2>&1 | grep -c "CANNOT_DETECT_FORMAT" $CLICKHOUSE_LOCAL -q "desc file('$DATA_FILE.{1,2}') settings schema_inference_mode='union'" 2>&1 | grep -c "CANNOT_DETECT_FORMAT"
$CLICKHOUSE_LOCAL -nmq " $CLICKHOUSE_LOCAL -mq "
desc file('$DATA_FILE.2'); desc file('$DATA_FILE.2');
desc file('$DATA_FILE.{1,2}'); desc file('$DATA_FILE.{1,2}');
" "

View File

@ -19,7 +19,7 @@ $CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -q "create database $n
CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT/--database=$CLICKHOUSE_DATABASE/--database=$new_database} CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT/--database=$CLICKHOUSE_DATABASE/--database=$new_database}
CLICKHOUSE_DATABASE="$new_database" CLICKHOUSE_DATABASE="$new_database"
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists data; drop table if exists data;
create table data (key Int) engine=MergeTree() order by key; create table data (key Int) engine=MergeTree() order by key;
insert into data values (1); insert into data values (1);
@ -29,7 +29,7 @@ $CLICKHOUSE_CLIENT -nm -q "
# suppress output # suppress output
$CLICKHOUSE_CLIENT -q "backup table data to S3('http://localhost:11111/test/s3_plain/backups/$CLICKHOUSE_DATABASE', 'test', 'testtest')" > /dev/null $CLICKHOUSE_CLIENT -q "backup table data to S3('http://localhost:11111/test/s3_plain/backups/$CLICKHOUSE_DATABASE', 'test', 'testtest')" > /dev/null
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table data; drop table data;
attach table data (key Int) engine=MergeTree() order by key attach table data (key Int) engine=MergeTree() order by key
settings settings

View File

@ -18,7 +18,7 @@ $CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -q "create database $n
CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT/--database=$CLICKHOUSE_DATABASE/--database=$new_database} CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT/--database=$CLICKHOUSE_DATABASE/--database=$new_database}
CLICKHOUSE_DATABASE="$new_database" CLICKHOUSE_DATABASE="$new_database"
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists data_read; drop table if exists data_read;
drop table if exists data_write; drop table if exists data_write;
@ -33,7 +33,7 @@ $CLICKHOUSE_CLIENT -nm -q "
# suppress output # suppress output
$CLICKHOUSE_CLIENT -q "backup table data_read to S3('http://localhost:11111/test/s3_plain/backups/$CLICKHOUSE_DATABASE', 'test', 'testtest')" > /dev/null $CLICKHOUSE_CLIENT -q "backup table data_read to S3('http://localhost:11111/test/s3_plain/backups/$CLICKHOUSE_DATABASE', 'test', 'testtest')" > /dev/null
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table data_read; drop table data_read;
attach table data_read (key Int) engine=ReplicatedMergeTree('/tables/{database}/data', 'read') order by key attach table data_read (key Int) engine=ReplicatedMergeTree('/tables/{database}/data', 'read') order by key
settings settings
@ -57,7 +57,7 @@ echo "Files before DETACH TABLE"
# sed to match any part, since in case of fault injection part name may not be all_0_0_0 but all_1_1_0 # sed to match any part, since in case of fault injection part name may not be all_0_0_0 but all_1_1_0
clickhouse-disks -C "$config" --disk s3_plain_disk --query "list --recursive $path" | tail -n+2 | sed 's/all_[^_]*_[^_]*_0/all_X_X_X/g' clickhouse-disks -C "$config" --disk s3_plain_disk --query "list --recursive $path" | tail -n+2 | sed 's/all_[^_]*_[^_]*_0/all_X_X_X/g'
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
detach table data_read; detach table data_read;
detach table data_write; detach table data_write;
" "

View File

@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do
fi fi
echo "$THIS_RUN" echo "$THIS_RUN"
$CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " $CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq "
$(python3 $CURDIR/03008_deduplication.python insert_several_blocks_into_table \ $(python3 $CURDIR/03008_deduplication.python insert_several_blocks_into_table \
--insert-method $insert_method \ --insert-method $insert_method \
--table-engine $ENGINE \ --table-engine $ENGINE \

View File

@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do
fi fi
echo "$THIS_RUN" echo "$THIS_RUN"
$CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " $CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq "
$(python3 $CURDIR/03008_deduplication.python insert_several_blocks_into_table \ $(python3 $CURDIR/03008_deduplication.python insert_several_blocks_into_table \
--insert-method $insert_method \ --insert-method $insert_method \
--table-engine $ENGINE \ --table-engine $ENGINE \

Some files were not shown because too many files have changed in this diff Show More