mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-09-19 08:10:48 +00:00
Compare commits
33 Commits
11c82ac904
...
0eb0830a13
Author | SHA1 | Date | |
---|---|---|---|
|
0eb0830a13 | ||
|
d793e06860 | ||
|
1986fb1418 | ||
|
20b25566f5 | ||
|
f36408a666 | ||
|
de85f5f251 | ||
|
85af661b9c | ||
|
b42c6491e4 | ||
|
1a4c7b7c61 | ||
|
14feba8443 | ||
|
4c4a051d5e | ||
|
a55cc03973 | ||
|
37411bf240 | ||
|
a461d20af9 | ||
|
b55d0b54ea | ||
|
418ef3f8bc | ||
|
b420bbf855 | ||
|
6a7cfd13f7 | ||
|
baf6aaef1d | ||
|
9ca149a487 | ||
|
042194e3f6 | ||
|
120e38c72a | ||
|
38b5ea9066 | ||
|
fe5e061fff | ||
|
f6b965872f | ||
|
22c3b71196 | ||
|
7425d4aa1a | ||
|
cf12e3924f | ||
|
cfc931160d | ||
|
b2c4b771d8 | ||
|
edf4e09fb2 | ||
|
07f44fdb89 | ||
|
2fcbe2465a |
34
.github/actions/debug/action.yml
vendored
34
.github/actions/debug/action.yml
vendored
@ -4,15 +4,31 @@ description: Prints workflow debug info
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Print envs
|
||||
- name: Envs, event.json and contexts
|
||||
shell: bash
|
||||
run: |
|
||||
echo "::group::Envs"
|
||||
env
|
||||
echo "::endgroup::"
|
||||
- name: Print Event.json
|
||||
shell: bash
|
||||
run: |
|
||||
echo "::group::Event.json"
|
||||
echo '::group::Environment variables'
|
||||
env | sort
|
||||
echo '::endgroup::'
|
||||
|
||||
echo '::group::event.json'
|
||||
python3 -m json.tool "$GITHUB_EVENT_PATH"
|
||||
echo "::endgroup::"
|
||||
echo '::endgroup::'
|
||||
|
||||
cat << 'EOF'
|
||||
::group::github context
|
||||
${{ toJSON(github) }}
|
||||
::endgroup::
|
||||
|
||||
::group::env context
|
||||
${{ toJSON(env) }}
|
||||
::endgroup::
|
||||
|
||||
::group::runner context
|
||||
${{ toJSON(runner) }}
|
||||
::endgroup::
|
||||
|
||||
::group::job context
|
||||
${{ toJSON(job) }}
|
||||
::endgroup::
|
||||
EOF
|
||||
|
2
.github/workflows/backport_branches.yml
vendored
2
.github/workflows/backport_branches.yml
vendored
@ -27,6 +27,8 @@ jobs:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Labels check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
|
2
.github/workflows/cherry_pick.yml
vendored
2
.github/workflows/cherry_pick.yml
vendored
@ -33,6 +33,8 @@ jobs:
|
||||
clear-repository: true
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Cherry pick
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
|
4
.github/workflows/create_release.yml
vendored
4
.github/workflows/create_release.yml
vendored
@ -56,13 +56,13 @@ jobs:
|
||||
GH_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||
runs-on: [self-hosted, release-maker]
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Prepare Release Info
|
||||
shell: bash
|
||||
run: |
|
||||
|
1
.github/workflows/docker_test_images.yml
vendored
1
.github/workflows/docker_test_images.yml
vendored
@ -11,6 +11,7 @@ name: Build docker images
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
DockerBuildAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
|
7
.github/workflows/jepsen.yml
vendored
7
.github/workflows/jepsen.yml
vendored
@ -8,27 +8,28 @@ on: # yamllint disable-line rule:truthy
|
||||
schedule:
|
||||
- cron: '0 */6 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
RunConfig:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: PrepareRunConfig
|
||||
id: runconfig
|
||||
run: |
|
||||
echo "::group::configure CI run"
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --workflow "$GITHUB_WORKFLOW" --outfile ${{ runner.temp }}/ci_run_data.json
|
||||
echo "::endgroup::"
|
||||
|
||||
|
||||
echo "::group::CI run configure results"
|
||||
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
|
||||
echo "::endgroup::"
|
||||
|
4
.github/workflows/master.yml
vendored
4
.github/workflows/master.yml
vendored
@ -15,14 +15,14 @@ jobs:
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Merge sync PR
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
|
4
.github/workflows/merge_queue.yml
vendored
4
.github/workflows/merge_queue.yml
vendored
@ -14,14 +14,14 @@ jobs:
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get a version
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Cancel PR workflow
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run
|
||||
|
4
.github/workflows/nightly.yml
vendored
4
.github/workflows/nightly.yml
vendored
@ -15,14 +15,14 @@ jobs:
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: PrepareRunConfig
|
||||
id: runconfig
|
||||
run: |
|
||||
|
4
.github/workflows/pull_request.yml
vendored
4
.github/workflows/pull_request.yml
vendored
@ -25,14 +25,14 @@ jobs:
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get a version
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Cancel previous Sync PR workflow
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run
|
||||
|
2
.github/workflows/release_branches.yml
vendored
2
.github/workflows/release_branches.yml
vendored
@ -24,6 +24,8 @@ jobs:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Labels check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
|
4
.github/workflows/reusable_simple_job.yml
vendored
4
.github/workflows/reusable_simple_job.yml
vendored
@ -62,8 +62,6 @@ jobs:
|
||||
env:
|
||||
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
@ -72,6 +70,8 @@ jobs:
|
||||
submodules: ${{inputs.submodules}}
|
||||
fetch-depth: ${{inputs.checkout_depth}}
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Set build envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
|
@ -13,16 +13,17 @@ Here is a complete list of available database engines. Follow the links for more
|
||||
|
||||
- [Atomic](../../engines/database-engines/atomic.md)
|
||||
|
||||
- [MySQL](../../engines/database-engines/mysql.md)
|
||||
- [Lazy](../../engines/database-engines/lazy.md)
|
||||
|
||||
- [MaterializedPostgreSQL](../../engines/database-engines/materialized-postgresql.md)
|
||||
|
||||
- [MaterializedMySQL](../../engines/database-engines/materialized-mysql.md)
|
||||
|
||||
- [Lazy](../../engines/database-engines/lazy.md)
|
||||
- [MySQL](../../engines/database-engines/mysql.md)
|
||||
|
||||
- [PostgreSQL](../../engines/database-engines/postgresql.md)
|
||||
|
||||
- [MaterializedPostgreSQL](../../engines/database-engines/materialized-postgresql.md)
|
||||
|
||||
- [Replicated](../../engines/database-engines/replicated.md)
|
||||
|
||||
- [SQLite](../../engines/database-engines/sqlite.md)
|
||||
|
||||
|
@ -107,6 +107,10 @@ The vector similarity index currently does not work with per-table, non-default
|
||||
[here](https://github.com/ClickHouse/ClickHouse/pull/51325#issuecomment-1605920475)). If necessary, the value must be changed in config.xml.
|
||||
:::
|
||||
|
||||
Vector index creation is known to be slow. To speed the process up, index creation can be parallelized. The maximum number of threads can be
|
||||
configured using server configuration
|
||||
setting [max_build_vector_similarity_index_thread_pool_size](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters_max_build_vector_similarity_index_thread_pool_size).
|
||||
|
||||
ANN indexes are built during column insertion and merge. As a result, `INSERT` and `OPTIMIZE` statements will be slower than for ordinary
|
||||
tables. ANNIndexes are ideally used only with immutable or rarely changed data, respectively when are far more read requests than write
|
||||
requests.
|
||||
|
@ -491,6 +491,14 @@ Type: Double
|
||||
|
||||
Default: 0.9
|
||||
|
||||
## max_build_vector_similarity_index_thread_pool_size {#server_configuration_parameters_max_build_vector_similarity_index_thread_pool_size}
|
||||
|
||||
The maximum number of threads to use for building vector indexes. 0 means all cores.
|
||||
|
||||
Type: UInt64
|
||||
|
||||
Default: 16
|
||||
|
||||
## cgroups_memory_usage_observer_wait_time
|
||||
|
||||
Interval in seconds during which the server's maximum allowed memory consumption is adjusted by the corresponding threshold in cgroups. (see
|
||||
|
@ -178,6 +178,9 @@
|
||||
M(ObjectStorageAzureThreads, "Number of threads in the AzureObjectStorage thread pool.") \
|
||||
M(ObjectStorageAzureThreadsActive, "Number of threads in the AzureObjectStorage thread pool running a task.") \
|
||||
M(ObjectStorageAzureThreadsScheduled, "Number of queued or active jobs in the AzureObjectStorage thread pool.") \
|
||||
M(BuildVectorSimilarityIndexThreads, "Number of threads in the build vector similarity index thread pool.") \
|
||||
M(BuildVectorSimilarityIndexThreadsActive, "Number of threads in the build vector similarity index thread pool running a task.") \
|
||||
M(BuildVectorSimilarityIndexThreadsScheduled, "Number of queued or active jobs in the build vector similarity index thread pool.") \
|
||||
\
|
||||
M(DiskPlainRewritableAzureDirectoryMapSize, "Number of local-to-remote path entries in the 'plain_rewritable' in-memory map for AzureObjectStorage.") \
|
||||
M(DiskPlainRewritableLocalDirectoryMapSize, "Number of local-to-remote path entries in the 'plain_rewritable' in-memory map for LocalObjectStorage.") \
|
||||
|
@ -63,6 +63,7 @@ static struct InitFiu
|
||||
REGULAR(keepermap_fail_drop_data) \
|
||||
REGULAR(lazy_pipe_fds_fail_close) \
|
||||
PAUSEABLE(infinite_sleep) \
|
||||
PAUSEABLE(stop_moving_part_before_swap_with_active) \
|
||||
|
||||
|
||||
namespace FailPoints
|
||||
|
@ -50,7 +50,7 @@ namespace DB
|
||||
M(UInt32, asynchronous_heavy_metrics_update_period_s, 120, "Period in seconds for updating heavy asynchronous metrics.", 0) \
|
||||
M(String, default_database, "default", "Default database name.", 0) \
|
||||
M(String, tmp_policy, "", "Policy for storage with temporary data.", 0) \
|
||||
M(UInt64, max_temporary_data_on_disk_size, 0, "The maximum amount of storage that could be used for external aggregation, joins or sorting., ", 0) \
|
||||
M(UInt64, max_temporary_data_on_disk_size, 0, "The maximum amount of storage that could be used for external aggregation, joins or sorting.", 0) \
|
||||
M(String, temporary_data_in_cache, "", "Cache disk name for temporary data.", 0) \
|
||||
M(UInt64, aggregate_function_group_array_max_element_size, 0xFFFFFF, "Max array element size in bytes for groupArray function. This limit is checked at serialization and help to avoid large state size.", 0) \
|
||||
M(GroupArrayActionWhenLimitReached, aggregate_function_group_array_action_when_limit_is_reached, GroupArrayActionWhenLimitReached::THROW, "Action to execute when max array element size is exceeded in groupArray: `throw` exception, or `discard` extra values", 0) \
|
||||
@ -65,6 +65,7 @@ namespace DB
|
||||
M(UInt64, async_insert_threads, 16, "Maximum number of threads to actually parse and insert data in background. Zero means asynchronous mode is disabled", 0) \
|
||||
M(Bool, async_insert_queue_flush_on_shutdown, true, "If true queue of asynchronous inserts is flushed on graceful shutdown", 0) \
|
||||
M(Bool, ignore_empty_sql_security_in_create_view_query, true, "If true, ClickHouse doesn't write defaults for empty SQL security statement in CREATE VIEW queries. This setting is only necessary for the migration period and will become obsolete in 24.4", 0) \
|
||||
M(UInt64, max_build_vector_similarity_index_thread_pool_size, 16, "The maximum number of threads to use to build vector similarity indexes. 0 means all cores.", 0) \
|
||||
\
|
||||
/* Database Catalog */ \
|
||||
M(UInt64, database_atomic_delay_before_drop_table_sec, 8 * 60, "The delay during which a dropped table can be restored using the UNDROP statement. If DROP TABLE ran with a SYNC modifier, the setting is ignored.", 0) \
|
||||
|
@ -50,13 +50,6 @@ private:
|
||||
return executeNonconstant(input);
|
||||
}
|
||||
|
||||
[[maybe_unused]] String toString() const
|
||||
{
|
||||
WriteBufferFromOwnString buf;
|
||||
buf << "format:" << format << ", rows:" << rows << ", is_literal:" << is_literal << ", input:" << input.dumpStructure() << "\n";
|
||||
return buf.str();
|
||||
}
|
||||
|
||||
private:
|
||||
ColumnWithTypeAndName executeLiteral(std::string_view literal) const
|
||||
{
|
||||
@ -231,9 +224,7 @@ public:
|
||||
const auto & instruction = instructions[i];
|
||||
try
|
||||
{
|
||||
// std::cout << "instruction[" << i << "]:" << instructions[i].toString() << std::endl;
|
||||
concat_args[i] = instruction.execute();
|
||||
// std::cout << "concat_args[" << i << "]:" << concat_args[i].dumpStructure() << std::endl;
|
||||
}
|
||||
catch (const fmt::v9::format_error & e)
|
||||
{
|
||||
@ -358,7 +349,14 @@ private:
|
||||
|
||||
REGISTER_FUNCTION(Printf)
|
||||
{
|
||||
factory.registerFunction<FunctionPrintf>();
|
||||
factory.registerFunction<FunctionPrintf>(
|
||||
FunctionDocumentation{.description=R"(
|
||||
The `printf` function formats the given string with the values (strings, integers, floating-points etc.) listed in the arguments, similar to printf function in C++.
|
||||
The format string can contain format specifiers starting with `%` character.
|
||||
Anything not contained in `%` and the following format specifier is considered literal text and copied verbatim into the output.
|
||||
Literal `%` character can be escaped by `%%`.)", .examples{{"sum", "select printf('%%%s %s %d', 'Hello', 'World', 2024);", "%Hello World 2024"}}, .categories{"String"}
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <Common/SensitiveDataMasker.h>
|
||||
#include <Common/Macros.h>
|
||||
#include <Common/EventNotifier.h>
|
||||
#include <Common/getNumberOfPhysicalCPUCores.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Common/Throttler.h>
|
||||
@ -121,7 +122,6 @@
|
||||
#include <Interpreters/InterpreterSelectWithUnionQuery.h>
|
||||
#include <base/defines.h>
|
||||
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace ProfileEvents
|
||||
@ -164,6 +164,9 @@ namespace CurrentMetrics
|
||||
extern const Metric TablesLoaderForegroundThreadsActive;
|
||||
extern const Metric TablesLoaderForegroundThreadsScheduled;
|
||||
extern const Metric IOWriterThreadsScheduled;
|
||||
extern const Metric BuildVectorSimilarityIndexThreads;
|
||||
extern const Metric BuildVectorSimilarityIndexThreadsActive;
|
||||
extern const Metric BuildVectorSimilarityIndexThreadsScheduled;
|
||||
extern const Metric AttachedTable;
|
||||
extern const Metric AttachedView;
|
||||
extern const Metric AttachedDictionary;
|
||||
@ -297,6 +300,8 @@ struct ContextSharedPart : boost::noncopyable
|
||||
mutable std::unique_ptr<ThreadPool> load_marks_threadpool; /// Threadpool for loading marks cache.
|
||||
mutable OnceFlag prefetch_threadpool_initialized;
|
||||
mutable std::unique_ptr<ThreadPool> prefetch_threadpool; /// Threadpool for loading marks cache.
|
||||
mutable OnceFlag build_vector_similarity_index_threadpool_initialized;
|
||||
mutable std::unique_ptr<ThreadPool> build_vector_similarity_index_threadpool; /// Threadpool for vector-similarity index creation.
|
||||
mutable UncompressedCachePtr index_uncompressed_cache TSA_GUARDED_BY(mutex); /// The cache of decompressed blocks for MergeTree indices.
|
||||
mutable QueryCachePtr query_cache TSA_GUARDED_BY(mutex); /// Cache of query results.
|
||||
mutable MarkCachePtr index_mark_cache TSA_GUARDED_BY(mutex); /// Cache of marks in compressed files of MergeTree indices.
|
||||
@ -3297,6 +3302,21 @@ size_t Context::getPrefetchThreadpoolSize() const
|
||||
return config.getUInt(".prefetch_threadpool_pool_size", 100);
|
||||
}
|
||||
|
||||
ThreadPool & Context::getBuildVectorSimilarityIndexThreadPool() const
|
||||
{
|
||||
callOnce(shared->build_vector_similarity_index_threadpool_initialized, [&] {
|
||||
size_t pool_size = shared->server_settings.max_build_vector_similarity_index_thread_pool_size > 0
|
||||
? shared->server_settings.max_build_vector_similarity_index_thread_pool_size
|
||||
: getNumberOfPhysicalCPUCores();
|
||||
shared->build_vector_similarity_index_threadpool = std::make_unique<ThreadPool>(
|
||||
CurrentMetrics::BuildVectorSimilarityIndexThreads,
|
||||
CurrentMetrics::BuildVectorSimilarityIndexThreadsActive,
|
||||
CurrentMetrics::BuildVectorSimilarityIndexThreadsScheduled,
|
||||
pool_size);
|
||||
});
|
||||
return *shared->build_vector_similarity_index_threadpool;
|
||||
}
|
||||
|
||||
BackgroundSchedulePool & Context::getBufferFlushSchedulePool() const
|
||||
{
|
||||
callOnce(shared->buffer_flush_schedule_pool_initialized, [&] {
|
||||
|
@ -1097,6 +1097,8 @@ public:
|
||||
/// and make a prefetch by putting a read task to threadpoolReader.
|
||||
size_t getPrefetchThreadpoolSize() const;
|
||||
|
||||
ThreadPool & getBuildVectorSimilarityIndexThreadPool() const;
|
||||
|
||||
/// Settings for MergeTree background tasks stored in config.xml
|
||||
BackgroundTaskSchedulingSettings getBackgroundProcessingTaskSchedulingSettings() const;
|
||||
BackgroundTaskSchedulingSettings getBackgroundMoveTaskSchedulingSettings() const;
|
||||
|
@ -74,7 +74,8 @@ private:
|
||||
findMySQLFunctionSecretArguments();
|
||||
}
|
||||
else if ((function.name == "s3") || (function.name == "cosn") || (function.name == "oss") ||
|
||||
(function.name == "deltaLake") || (function.name == "hudi") || (function.name == "iceberg"))
|
||||
(function.name == "deltaLake") || (function.name == "hudi") || (function.name == "iceberg") ||
|
||||
(function.name == "gcs"))
|
||||
{
|
||||
/// s3('url', 'aws_access_key_id', 'aws_secret_access_key', ...)
|
||||
findS3FunctionSecretArguments(/* is_cluster_function= */ false);
|
||||
|
@ -5,9 +5,11 @@
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Common/BitHelpers.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Common/getNumberOfPhysicalCPUCores.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Core/Field.h>
|
||||
#include <Core/ServerSettings.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
@ -29,7 +31,6 @@ namespace DB
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int CANNOT_ALLOCATE_MEMORY;
|
||||
extern const int FORMAT_VERSION_TOO_OLD;
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
extern const int INCORRECT_DATA;
|
||||
@ -131,8 +132,7 @@ void USearchIndexWithSerialization::deserialize(ReadBuffer & istr)
|
||||
/// See the comment in MergeTreeIndexGranuleVectorSimilarity::deserializeBinary why we throw here
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Could not load vector similarity index. Please drop the index and create it again. Error: {}", String(result.error.release()));
|
||||
|
||||
if (!try_reserve(limits()))
|
||||
throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Could not reserve memory for usearch index");
|
||||
try_reserve(limits());
|
||||
}
|
||||
|
||||
USearchIndexWithSerialization::Statistics USearchIndexWithSerialization::getStatistics() const
|
||||
@ -270,20 +270,49 @@ void updateImpl(const ColumnArray * column_array, const ColumnArray::Offsets & c
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column with vector similarity index must have equal length");
|
||||
|
||||
/// Reserving space is mandatory
|
||||
if (!index->try_reserve(roundUpToPowerOfTwoOrZero(index->size() + rows)))
|
||||
throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Could not reserve memory for vector similarity index");
|
||||
size_t max_thread_pool_size = Context::getGlobalContextInstance()->getServerSettings().max_build_vector_similarity_index_thread_pool_size;
|
||||
if (max_thread_pool_size == 0)
|
||||
max_thread_pool_size = getNumberOfPhysicalCPUCores();
|
||||
unum::usearch::index_limits_t limits(roundUpToPowerOfTwoOrZero(index->size() + rows), max_thread_pool_size);
|
||||
index->reserve(limits);
|
||||
|
||||
for (size_t row = 0; row < rows; ++row)
|
||||
/// Vector index creation is slooooow. Add the new rows in parallel. The threadpool is global to avoid oversubscription when multiple
|
||||
/// indexes are build simultaneously (e.g. multiple merges run at the same time).
|
||||
auto & thread_pool = Context::getGlobalContextInstance()->getBuildVectorSimilarityIndexThreadPool();
|
||||
|
||||
auto add_vector_to_index = [&](USearchIndex::vector_key_t key, size_t row, ThreadGroupPtr thread_group)
|
||||
{
|
||||
if (auto result = index->add(static_cast<USearchIndex::vector_key_t>(index->size()), &column_array_data_float_data[column_array_offsets[row - 1]]); !result)
|
||||
SCOPE_EXIT_SAFE(
|
||||
if (thread_group)
|
||||
CurrentThread::detachFromGroupIfNotDetached();
|
||||
);
|
||||
|
||||
if (thread_group)
|
||||
CurrentThread::attachToGroupIfDetached(thread_group);
|
||||
|
||||
/// add is thread-safe
|
||||
if (auto result = index->add(key, &column_array_data_float_data[column_array_offsets[row - 1]]); !result)
|
||||
{
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Could not add data to vector similarity index. Error: {}", String(result.error.release()));
|
||||
}
|
||||
else
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::USearchAddCount);
|
||||
ProfileEvents::increment(ProfileEvents::USearchAddVisitedMembers, result.visited_members);
|
||||
ProfileEvents::increment(ProfileEvents::USearchAddComputedDistances, result.computed_distances);
|
||||
}
|
||||
};
|
||||
|
||||
size_t index_size = index->size();
|
||||
|
||||
for (size_t row = 0; row < rows; ++row)
|
||||
{
|
||||
auto key = static_cast<USearchIndex::vector_key_t>(index_size + row);
|
||||
auto task = [group = CurrentThread::getGroup(), &add_vector_to_index, key, row] { add_vector_to_index(key, row, group); };
|
||||
thread_pool.scheduleOrThrowOnError(task);
|
||||
}
|
||||
|
||||
thread_pool.wait();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <Storages/MergeTree/MergeTreeData.h>
|
||||
#include <Storages/MergeTree/MergeTreePartsMover.h>
|
||||
#include <Storages/MergeTree/MergeTreeSettings.h>
|
||||
#include <Common/FailPoint.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
#include <set>
|
||||
@ -15,6 +16,11 @@ namespace ErrorCodes
|
||||
extern const int DIRECTORY_ALREADY_EXISTS;
|
||||
}
|
||||
|
||||
namespace FailPoints
|
||||
{
|
||||
extern const char stop_moving_part_before_swap_with_active[];
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
@ -226,6 +232,7 @@ MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const Me
|
||||
cloned_part.temporary_directory_lock = data->getTemporaryPartDirectoryHolder(part->name);
|
||||
|
||||
MutableDataPartStoragePtr cloned_part_storage;
|
||||
bool preserve_blobs = false;
|
||||
if (disk->supportZeroCopyReplication() && settings->allow_remote_fs_zero_copy_replication)
|
||||
{
|
||||
/// Try zero-copy replication and fallback to default copy if it's not possible
|
||||
@ -253,6 +260,7 @@ MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const Me
|
||||
if (zero_copy_part)
|
||||
{
|
||||
/// FIXME for some reason we cannot just use this part, we have to re-create it through MergeTreeDataPartBuilder
|
||||
preserve_blobs = true;
|
||||
zero_copy_part->is_temp = false; /// Do not remove it in dtor
|
||||
cloned_part_storage = zero_copy_part->getDataPartStoragePtr();
|
||||
}
|
||||
@ -272,7 +280,17 @@ MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const Me
|
||||
cloned_part.part = std::move(builder).withPartFormatFromDisk().build();
|
||||
LOG_TRACE(log, "Part {} was cloned to {}", part->name, cloned_part.part->getDataPartStorage().getFullPath());
|
||||
|
||||
cloned_part.part->is_temp = data->allowRemoveStaleMovingParts();
|
||||
cloned_part.part->is_temp = false;
|
||||
if (data->allowRemoveStaleMovingParts())
|
||||
{
|
||||
cloned_part.part->is_temp = true;
|
||||
/// Setting it in case connection to zookeeper is lost while moving
|
||||
/// Otherwise part might be stuck in the moving directory due to the KEEPER_EXCEPTION in part's destructor
|
||||
if (preserve_blobs)
|
||||
cloned_part.part->remove_tmp_policy = IMergeTreeDataPart::BlobsRemovalPolicyForTemporaryParts::PRESERVE_BLOBS;
|
||||
else
|
||||
cloned_part.part->remove_tmp_policy = IMergeTreeDataPart::BlobsRemovalPolicyForTemporaryParts::REMOVE_BLOBS;
|
||||
}
|
||||
cloned_part.part->loadColumnsChecksumsIndexes(true, true);
|
||||
cloned_part.part->loadVersionMetadata();
|
||||
cloned_part.part->modification_time = cloned_part.part->getDataPartStorage().getLastModified().epochTime();
|
||||
@ -282,6 +300,8 @@ MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const Me
|
||||
|
||||
void MergeTreePartsMover::swapClonedPart(TemporaryClonedPart & cloned_part) const
|
||||
{
|
||||
/// Used to get some stuck parts in the moving directory by stopping moves while pause is active
|
||||
FailPointInjection::pauseFailPoint(FailPoints::stop_moving_part_before_swap_with_active);
|
||||
if (moves_blocker.isCancelled())
|
||||
throw Exception(ErrorCodes::ABORTED, "Cancelled moving parts.");
|
||||
|
||||
|
@ -42,6 +42,7 @@
|
||||
<multi_read>1</multi_read>
|
||||
<check_not_exists>1</check_not_exists>
|
||||
<create_if_not_exists>1</create_if_not_exists>
|
||||
<remove_recursive>1</remove_recursive>
|
||||
</feature_flags>
|
||||
</keeper_server>
|
||||
</clickhouse>
|
||||
|
@ -105,7 +105,7 @@ setup_logs_replication
|
||||
|
||||
clickhouse-client --query "SHOW DATABASES"
|
||||
clickhouse-client --query "CREATE DATABASE datasets"
|
||||
clickhouse-client --multiquery < /repo/tests/docker_scripts/create.sql
|
||||
clickhouse-client < /repo/tests/docker_scripts/create.sql
|
||||
clickhouse-client --query "SHOW TABLES FROM datasets"
|
||||
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
|
@ -62,7 +62,7 @@ start_server
|
||||
setup_logs_replication
|
||||
|
||||
clickhouse-client --query "CREATE DATABASE datasets"
|
||||
clickhouse-client --multiquery < /repo/tests/docker_scripts/create.sql
|
||||
clickhouse-client < /repo/tests/docker_scripts/create.sql
|
||||
clickhouse-client --query "SHOW TABLES FROM datasets"
|
||||
|
||||
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test"
|
||||
|
@ -64,6 +64,7 @@ function configure()
|
||||
randomize_config_boolean_value multi_read keeper_port
|
||||
randomize_config_boolean_value check_not_exists keeper_port
|
||||
randomize_config_boolean_value create_if_not_exists keeper_port
|
||||
randomize_config_boolean_value remove_recursive keeper_port
|
||||
fi
|
||||
|
||||
sudo chown clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
|
@ -89,7 +89,6 @@ class Client:
|
||||
command = self.command[:]
|
||||
|
||||
if stdin is None:
|
||||
command += ["--multiquery"]
|
||||
stdin = sql
|
||||
else:
|
||||
command += ["--query", sql]
|
||||
|
@ -393,6 +393,7 @@ def test_table_functions():
|
||||
f"azureBlobStorageCluster('test_shard_localhost', named_collection_2, connection_string = '{azure_conn_string}', container = 'cont', blob_path = 'test_simple_16.csv', format = 'CSV')",
|
||||
f"azureBlobStorageCluster('test_shard_localhost', named_collection_2, storage_account_url = '{azure_storage_account_url}', container = 'cont', blob_path = 'test_simple_17.csv', account_name = '{azure_account_name}', account_key = '{azure_account_key}')",
|
||||
f"iceberg('http://minio1:9001/root/data/test11.csv.gz', 'minio', '{password}')",
|
||||
f"gcs('http://minio1:9001/root/data/test11.csv.gz', 'minio', '{password}')",
|
||||
]
|
||||
|
||||
def make_test_case(i):
|
||||
|
46
tests/integration/test_remove_stale_moving_parts/config.xml
Normal file
46
tests/integration/test_remove_stale_moving_parts/config.xml
Normal file
@ -0,0 +1,46 @@
|
||||
<clickhouse>
|
||||
<remote_servers>
|
||||
<cluster>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>ch1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
<macros>
|
||||
<shard>01</shard>
|
||||
</macros>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>http://minio1:9001/root/data/</endpoint>
|
||||
<access_key_id>minio</access_key_id>
|
||||
<secret_access_key>minio123</secret_access_key>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
<s3>
|
||||
<volumes>
|
||||
<default>
|
||||
<disk>default</disk>
|
||||
<perform_ttl_move_on_insert>False</perform_ttl_move_on_insert>
|
||||
</default>
|
||||
<s3>
|
||||
<disk>s3</disk>
|
||||
<perform_ttl_move_on_insert>False</perform_ttl_move_on_insert>
|
||||
</s3>
|
||||
</volumes>
|
||||
<move_factor>0.0</move_factor>
|
||||
</s3>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
|
||||
<merge_tree>
|
||||
<allow_remote_fs_zero_copy_replication>true</allow_remote_fs_zero_copy_replication>
|
||||
<storage_policy>s3</storage_policy>
|
||||
</merge_tree>
|
||||
<allow_remove_stale_moving_parts>true</allow_remove_stale_moving_parts>
|
||||
</clickhouse>
|
117
tests/integration/test_remove_stale_moving_parts/test.py
Normal file
117
tests/integration/test_remove_stale_moving_parts/test.py
Normal file
@ -0,0 +1,117 @@
|
||||
from pathlib import Path
|
||||
import time
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
ch1 = cluster.add_instance(
|
||||
"ch1",
|
||||
main_configs=[
|
||||
"config.xml",
|
||||
],
|
||||
macros={"replica": "node1"},
|
||||
with_zookeeper=True,
|
||||
with_minio=True,
|
||||
)
|
||||
|
||||
DATABASE_NAME = "stale_moving_parts"
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def q(node, query):
|
||||
return node.query(database=DATABASE_NAME, sql=query)
|
||||
|
||||
|
||||
# .../disks/s3/store/
|
||||
def get_table_path(node, table):
|
||||
return (
|
||||
node.query(
|
||||
sql=f"SELECT data_paths FROM system.tables WHERE table = '{table}' and database = '{DATABASE_NAME}' LIMIT 1"
|
||||
)
|
||||
.strip('"\n[]')
|
||||
.split(",")[1]
|
||||
.strip("'")
|
||||
)
|
||||
|
||||
|
||||
def exec(node, cmd, path):
|
||||
return node.exec_in_container(
|
||||
[
|
||||
"bash",
|
||||
"-c",
|
||||
f"{cmd} {path}",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def wait_part_is_stuck(node, table_moving_path, moving_part):
|
||||
num_tries = 5
|
||||
while q(node, "SELECT part_name FROM system.moves").strip() != moving_part:
|
||||
if num_tries == 0:
|
||||
raise Exception("Part has not started to move")
|
||||
num_tries -= 1
|
||||
time.sleep(1)
|
||||
num_tries = 5
|
||||
while exec(node, "ls", table_moving_path).strip() != moving_part:
|
||||
if num_tries == 0:
|
||||
raise Exception("Part is not stuck in the moving directory")
|
||||
num_tries -= 1
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
def wait_zookeeper_node_to_start(zk_nodes, timeout=60):
|
||||
start = time.time()
|
||||
while time.time() - start < timeout:
|
||||
try:
|
||||
for instance in zk_nodes:
|
||||
conn = cluster.get_kazoo_client(instance)
|
||||
conn.get_children("/")
|
||||
print("All instances of ZooKeeper started")
|
||||
return
|
||||
except Exception as ex:
|
||||
print(("Can't connect to ZooKeeper " + str(ex)))
|
||||
time.sleep(0.5)
|
||||
|
||||
|
||||
def test_remove_stale_moving_parts_without_zookeeper(started_cluster):
|
||||
ch1.query(f"CREATE DATABASE IF NOT EXISTS {DATABASE_NAME}")
|
||||
|
||||
q(
|
||||
ch1,
|
||||
"CREATE TABLE test_remove ON CLUSTER cluster ( id UInt32 ) ENGINE ReplicatedMergeTree() ORDER BY id;",
|
||||
)
|
||||
|
||||
table_moving_path = Path(get_table_path(ch1, "test_remove")) / "moving"
|
||||
|
||||
q(ch1, "SYSTEM ENABLE FAILPOINT stop_moving_part_before_swap_with_active")
|
||||
q(ch1, "INSERT INTO test_remove SELECT number FROM numbers(100);")
|
||||
moving_part = "all_0_0_0"
|
||||
move_response = ch1.get_query_request(
|
||||
sql=f"ALTER TABLE test_remove MOVE PART '{moving_part}' TO DISK 's3'",
|
||||
database=DATABASE_NAME,
|
||||
)
|
||||
|
||||
wait_part_is_stuck(ch1, table_moving_path, moving_part)
|
||||
|
||||
cluster.stop_zookeeper_nodes(["zoo1", "zoo2", "zoo3"])
|
||||
# Stop moves in case table is not read-only yet
|
||||
q(ch1, "SYSTEM STOP MOVES")
|
||||
q(ch1, "SYSTEM DISABLE FAILPOINT stop_moving_part_before_swap_with_active")
|
||||
|
||||
assert "Cancelled moving parts" in move_response.get_error()
|
||||
assert exec(ch1, "ls", table_moving_path).strip() == ""
|
||||
|
||||
cluster.start_zookeeper_nodes(["zoo1", "zoo2", "zoo3"])
|
||||
wait_zookeeper_node_to_start(["zoo1", "zoo2", "zoo3"])
|
||||
q(ch1, "SYSTEM START MOVES")
|
||||
|
||||
q(ch1, f"DROP TABLE test_remove")
|
@ -427,7 +427,7 @@ do
|
||||
done
|
||||
|
||||
# for each query run, prepare array of metrics from query log
|
||||
clickhouse-local --multiquery --query "
|
||||
clickhouse-local --query "
|
||||
create view query_runs as select * from file('analyze/query-runs.tsv', TSV,
|
||||
'test text, query_index int, query_id text, version UInt8, time float');
|
||||
|
||||
@ -582,7 +582,7 @@ numactl --cpunodebind=all --membind=all numactl --show
|
||||
# If the available memory falls below 2 * size, GNU parallel will suspend some of the running jobs.
|
||||
numactl --cpunodebind=all --membind=all parallel -v --joblog analyze/parallel-log.txt --memsuspend 15G --null < analyze/commands.txt 2>> analyze/errors.log
|
||||
|
||||
clickhouse-local --multiquery --query "
|
||||
clickhouse-local --query "
|
||||
-- Join the metric names back to the metric statistics we've calculated, and make
|
||||
-- a denormalized table of them -- statistics for all metrics for all queries.
|
||||
-- The WITH, ARRAY JOIN and CROSS JOIN do not like each other:
|
||||
@ -680,7 +680,7 @@ rm ./*.{rep,svg} test-times.tsv test-dump.tsv unstable.tsv unstable-query-ids.ts
|
||||
cat analyze/errors.log >> report/errors.log ||:
|
||||
cat profile-errors.log >> report/errors.log ||:
|
||||
|
||||
clickhouse-local --multiquery --query "
|
||||
clickhouse-local --query "
|
||||
create view query_display_names as select * from
|
||||
file('analyze/query-display-names.tsv', TSV,
|
||||
'test text, query_index int, query_display_name text')
|
||||
@ -981,7 +981,7 @@ create table all_query_metrics_tsv engine File(TSV, 'report/all-query-metrics.ts
|
||||
for version in {right,left}
|
||||
do
|
||||
rm -rf data
|
||||
clickhouse-local --multiquery --query "
|
||||
clickhouse-local --query "
|
||||
create view query_profiles as
|
||||
with 0 as left, 1 as right
|
||||
select * from file('analyze/query-profiles.tsv', TSV,
|
||||
@ -1151,7 +1151,7 @@ function report_metrics
|
||||
rm -rf metrics ||:
|
||||
mkdir metrics
|
||||
|
||||
clickhouse-local --multiquery --query "
|
||||
clickhouse-local --query "
|
||||
create view right_async_metric_log as
|
||||
select * from file('right-async-metric-log.tsv', TSVWithNamesAndTypes)
|
||||
;
|
||||
@ -1211,7 +1211,7 @@ function upload_results
|
||||
# Prepare info for the CI checks table.
|
||||
rm -f ci-checks.tsv
|
||||
|
||||
clickhouse-local --multiquery --query "
|
||||
clickhouse-local --query "
|
||||
create view queries as select * from file('report/queries.tsv', TSVWithNamesAndTypes);
|
||||
|
||||
create table ci_checks engine File(TSVWithNamesAndTypes, 'ci-checks.tsv')
|
||||
|
@ -5,4 +5,4 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT -n -q 'select 1 -- { clientError FOOBAR }' |& grep -o 'No error code with name:.*'
|
||||
$CLICKHOUSE_CLIENT -q 'select 1 -- { clientError FOOBAR }' |& grep -o 'No error code with name:.*'
|
||||
|
@ -12,14 +12,14 @@ echo "
|
||||
DROP TABLE IF EXISTS rocksdb_race;
|
||||
CREATE TABLE rocksdb_race (key String, value UInt32) Engine=EmbeddedRocksDB PRIMARY KEY(key);
|
||||
INSERT INTO rocksdb_race SELECT '1_' || toString(number), number FROM numbers(100000);
|
||||
" | $CLICKHOUSE_CLIENT -n
|
||||
" | $CLICKHOUSE_CLIENT
|
||||
|
||||
function read_stat_thread()
|
||||
{
|
||||
while true; do
|
||||
echo "
|
||||
SELECT * FROM system.rocksdb FORMAT Null;
|
||||
" | $CLICKHOUSE_CLIENT -n
|
||||
" | $CLICKHOUSE_CLIENT
|
||||
done
|
||||
}
|
||||
|
||||
@ -29,7 +29,7 @@ function truncate_thread()
|
||||
sleep 3s;
|
||||
echo "
|
||||
TRUNCATE TABLE rocksdb_race;
|
||||
" | $CLICKHOUSE_CLIENT -n
|
||||
" | $CLICKHOUSE_CLIENT
|
||||
done
|
||||
}
|
||||
|
||||
|
@ -12,7 +12,7 @@ opts=(
|
||||
--join_algorithm='parallel_hash'
|
||||
)
|
||||
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
CREATE TABLE t1(a UInt32, b UInt32) ENGINE=MergeTree ORDER BY ();
|
||||
INSERT INTO t1 SELECT number, number FROM numbers_mt(1e6);
|
||||
|
||||
|
@ -5,12 +5,12 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
timeout -s INT 3s $CLICKHOUSE_CLIENT --max_block_size 1 -nm -q "
|
||||
timeout -s INT 3s $CLICKHOUSE_CLIENT --max_block_size 1 -m -q "
|
||||
SELECT sleep(1) FROM numbers(100) FORMAT Null;
|
||||
SELECT 'FAIL';
|
||||
"
|
||||
|
||||
timeout -s INT 3s $CLICKHOUSE_LOCAL --max_block_size 1 -nm -q "
|
||||
timeout -s INT 3s $CLICKHOUSE_LOCAL --max_block_size 1 -m -q "
|
||||
SELECT sleep(1) FROM numbers(100) FORMAT Null;
|
||||
SELECT 'FAIL';
|
||||
"
|
||||
|
@ -16,7 +16,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
|
||||
|
||||
$CLICKHOUSE_CLIENT --echo --query "SYSTEM DROP FILESYSTEM CACHE"
|
||||
|
||||
$CLICKHOUSE_CLIENT --echo -n --query "SELECT file_segment_range_begin, file_segment_range_end, size, state
|
||||
$CLICKHOUSE_CLIENT --echo --query "SELECT file_segment_range_begin, file_segment_range_end, size, state
|
||||
FROM
|
||||
(
|
||||
SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path
|
||||
@ -37,7 +37,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
|
||||
|
||||
$CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100)"
|
||||
|
||||
$CLICKHOUSE_CLIENT --echo -n --query "SELECT file_segment_range_begin, file_segment_range_end, size, state
|
||||
$CLICKHOUSE_CLIENT --echo --query "SELECT file_segment_range_begin, file_segment_range_end, size, state
|
||||
FROM
|
||||
(
|
||||
SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path
|
||||
@ -70,7 +70,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
|
||||
|
||||
$CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100, 200)"
|
||||
|
||||
$CLICKHOUSE_CLIENT --echo -n --query "SELECT file_segment_range_begin, file_segment_range_end, size, state
|
||||
$CLICKHOUSE_CLIENT --echo --query "SELECT file_segment_range_begin, file_segment_range_end, size, state
|
||||
FROM
|
||||
(
|
||||
SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path
|
||||
@ -109,7 +109,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
|
||||
|
||||
$CLICKHOUSE_CLIENT --echo --query "SYSTEM FLUSH LOGS"
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query "SELECT
|
||||
$CLICKHOUSE_CLIENT --query "SELECT
|
||||
query, ProfileEvents['RemoteFSReadBytes'] > 0 as remote_fs_read
|
||||
FROM
|
||||
system.query_log
|
||||
|
@ -15,7 +15,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
DROP ROLE IF EXISTS test_role_02242;
|
||||
CREATE ROLE test_role_02242;
|
||||
"
|
||||
|
@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
DROP ROLE IF EXISTS test_role_02244;
|
||||
CREATE ROLE test_role_02244;
|
||||
DROP USER IF EXISTS kek_02243;
|
||||
@ -37,4 +37,4 @@ $CLICKHOUSE_CLIENT --user kek_02243 -q "SELECT * FROM test" 2>&1| grep -Fa "Exce
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "DROP ROLE IF EXISTS test_role_02243"
|
||||
$CLICKHOUSE_CLIENT -q "DROP USER IF EXISTS test_user_02243"
|
||||
$CLICKHOUSE_CLIENT -q "DROP USER IF EXISTS kek_02243"
|
||||
$CLICKHOUSE_CLIENT -q "DROP USER IF EXISTS kek_02243"
|
||||
|
@ -44,7 +44,7 @@ protobuf_info() {
|
||||
fi
|
||||
}
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query "
|
||||
$CLICKHOUSE_CLIENT --query "
|
||||
DROP TABLE IF EXISTS $MAIN_TABLE;
|
||||
DROP TABLE IF EXISTS $ROUNDTRIP_TABLE;
|
||||
DROP TABLE IF EXISTS $COMPATIBILITY_TABLE;
|
||||
@ -78,14 +78,14 @@ echo $SET_OUTPUT
|
||||
|
||||
echo
|
||||
echo "Insert $INITIAL_INSERT_VALUES into table (Nullable(String), Int32):"
|
||||
$CLICKHOUSE_CLIENT -n --query "
|
||||
$CLICKHOUSE_CLIENT --query "
|
||||
INSERT INTO $MAIN_TABLE VALUES $INITIAL_INSERT_VALUES;
|
||||
SELECT * FROM $MAIN_TABLE;
|
||||
"
|
||||
|
||||
echo
|
||||
echo "Protobuf representation of the second row:"
|
||||
$CLICKHOUSE_CLIENT -n --query "$SET_OUTPUT SELECT * FROM $MAIN_TABLE WHERE ref = 2 LIMIT 1 $(protobuf_info output ProtobufSingle Message)" > "$BINARY_FILE_PATH"
|
||||
$CLICKHOUSE_CLIENT --query "$SET_OUTPUT SELECT * FROM $MAIN_TABLE WHERE ref = 2 LIMIT 1 $(protobuf_info output ProtobufSingle Message)" > "$BINARY_FILE_PATH"
|
||||
hexdump -C $BINARY_FILE_PATH
|
||||
|
||||
echo
|
||||
@ -101,12 +101,12 @@ hexdump -C $MESSAGE_FILE_PATH
|
||||
|
||||
echo
|
||||
echo "Insert proto message into table (Nullable(String), Int32):"
|
||||
$CLICKHOUSE_CLIENT -n --query "$SET_INPUT INSERT INTO $ROUNDTRIP_TABLE $(protobuf_info input Protobuf Message)" < "$MESSAGE_FILE_PATH"
|
||||
$CLICKHOUSE_CLIENT --query "$SET_INPUT INSERT INTO $ROUNDTRIP_TABLE $(protobuf_info input Protobuf Message)" < "$MESSAGE_FILE_PATH"
|
||||
$CLICKHOUSE_CLIENT --query "SELECT * FROM $ROUNDTRIP_TABLE"
|
||||
|
||||
echo
|
||||
echo "Proto output of the table using Google wrapper:"
|
||||
$CLICKHOUSE_CLIENT -n --query "$SET_OUTPUT SELECT * FROM $ROUNDTRIP_TABLE $(protobuf_info output Protobuf Message)" > "$BINARY_FILE_PATH"
|
||||
$CLICKHOUSE_CLIENT --query "$SET_OUTPUT SELECT * FROM $ROUNDTRIP_TABLE $(protobuf_info output Protobuf Message)" > "$BINARY_FILE_PATH"
|
||||
hexdump -C $BINARY_FILE_PATH
|
||||
|
||||
echo
|
||||
@ -124,14 +124,14 @@ echo
|
||||
echo "Insert $MULTI_WRAPPER_VALUES and reinsert using Google wrappers into:"
|
||||
echo "Table (Nullable(Int32), Nullable(Int32), Int32):"
|
||||
$CLICKHOUSE_CLIENT --query "INSERT INTO $MULTI_TABLE VALUES $MULTI_WRAPPER_VALUES"
|
||||
$CLICKHOUSE_CLIENT -n --query "$SET_OUTPUT SELECT * FROM $MULTI_TABLE $(protobuf_info output Protobuf MessageMultiWrapper)" > "$BINARY_FILE_PATH"
|
||||
$CLICKHOUSE_CLIENT -n --query "$SET_INPUT INSERT INTO $MULTI_TABLE $(protobuf_info input Protobuf MessageMultiWrapper)" < "$BINARY_FILE_PATH"
|
||||
$CLICKHOUSE_CLIENT --query "$SET_OUTPUT SELECT * FROM $MULTI_TABLE $(protobuf_info output Protobuf MessageMultiWrapper)" > "$BINARY_FILE_PATH"
|
||||
$CLICKHOUSE_CLIENT --query "$SET_INPUT INSERT INTO $MULTI_TABLE $(protobuf_info input Protobuf MessageMultiWrapper)" < "$BINARY_FILE_PATH"
|
||||
$CLICKHOUSE_CLIENT --query "SELECT * FROM $MULTI_TABLE"
|
||||
|
||||
rm "$BINARY_FILE_PATH"
|
||||
rm "$MESSAGE_FILE_PATH"
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query "
|
||||
$CLICKHOUSE_CLIENT --query "
|
||||
DROP TABLE $MAIN_TABLE;
|
||||
DROP TABLE $ROUNDTRIP_TABLE;
|
||||
DROP TABLE $COMPATIBILITY_TABLE;
|
||||
|
@ -11,7 +11,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
|
||||
echo "Using storage policy: $STORAGE_POLICY"
|
||||
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS test_02286"
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query "CREATE TABLE test_02286 (key UInt32, value String)
|
||||
$CLICKHOUSE_CLIENT --query "CREATE TABLE test_02286 (key UInt32, value String)
|
||||
Engine=MergeTree()
|
||||
ORDER BY key
|
||||
SETTINGS storage_policy='$STORAGE_POLICY', min_bytes_for_wide_part = 10485760"
|
||||
@ -38,7 +38,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
|
||||
$CLICKHOUSE_CLIENT --query "SELECT * FROM test_02286 FORMAT Null"
|
||||
$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache"
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query "SELECT count()
|
||||
$CLICKHOUSE_CLIENT --query "SELECT count()
|
||||
FROM (
|
||||
SELECT
|
||||
arrayJoin(cache_paths) AS cache_path,
|
||||
@ -54,7 +54,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
|
||||
$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "SELECT cache_path FROM system.filesystem_cache"
|
||||
$CLICKHOUSE_CLIENT -n --query "SELECT cache_path, local_path
|
||||
$CLICKHOUSE_CLIENT --query "SELECT cache_path, local_path
|
||||
FROM (
|
||||
SELECT
|
||||
arrayJoin(cache_paths) AS cache_path,
|
||||
|
@ -23,7 +23,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 's3_cache_multi' 'azure_cache'; d
|
||||
ORDER BY tuple()
|
||||
SETTINGS storage_policy = '$STORAGE_POLICY'" > /dev/null
|
||||
|
||||
$CLICKHOUSE_CLIENT --enable_filesystem_cache_on_write_operations=0 -n --query "INSERT INTO test_02313
|
||||
$CLICKHOUSE_CLIENT --enable_filesystem_cache_on_write_operations=0 --query "INSERT INTO test_02313
|
||||
SELECT * FROM
|
||||
generateRandom('id Int32, val String')
|
||||
LIMIT 100000"
|
||||
|
@ -9,7 +9,7 @@ function check_refcnt_for_table()
|
||||
{
|
||||
local table=$1 && shift
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
system stop merges $table;
|
||||
-- cleanup thread may hold the parts lock
|
||||
system stop cleanup $table;
|
||||
@ -66,14 +66,14 @@ function check_refcnt_for_table()
|
||||
|
||||
# NOTE: index_granularity=1 to cancel ASAP
|
||||
|
||||
$CLICKHOUSE_CLIENT -nmq "
|
||||
$CLICKHOUSE_CLIENT -mq "
|
||||
drop table if exists data_02340;
|
||||
create table data_02340 (key Int, part Int) engine=MergeTree() partition by part order by key settings index_granularity=1;
|
||||
" || exit 1
|
||||
check_refcnt_for_table data_02340
|
||||
$CLICKHOUSE_CLIENT -q "drop table data_02340 sync"
|
||||
|
||||
$CLICKHOUSE_CLIENT -nmq "
|
||||
$CLICKHOUSE_CLIENT -mq "
|
||||
drop table if exists data_02340_rep sync;
|
||||
create table data_02340_rep (key Int, part Int) engine=ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX', '1') partition by part order by key settings index_granularity=1;
|
||||
" || exit 1
|
||||
|
@ -7,14 +7,14 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
|
||||
disk_name="02344_describe_cache_test"
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query """
|
||||
$CLICKHOUSE_CLIENT -m --query """
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test (a Int32, b String)
|
||||
ENGINE = MergeTree() ORDER BY tuple()
|
||||
SETTINGS disk = disk(name = '$disk_name', type = cache, max_size = '100Ki', path = '$disk_name', disk = 's3_disk', load_metadata_asynchronously = 0);
|
||||
"""
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query """
|
||||
$CLICKHOUSE_CLIENT -m --query """
|
||||
SELECT count() FROM system.disks WHERE name = '$disk_name'
|
||||
"""
|
||||
|
||||
|
@ -24,7 +24,7 @@ function wait_query_by_id_started()
|
||||
# wait for query to be started
|
||||
while [ "$($CLICKHOUSE_CLIENT "$@" -q "select count() from system.processes where query_id = '$query_id'")" -ne 1 ]; do
|
||||
if [ "$(
|
||||
$CLICKHOUSE_CLIENT --max_bytes_before_external_group_by 0 -nm -q "
|
||||
$CLICKHOUSE_CLIENT --max_bytes_before_external_group_by 0 -m -q "
|
||||
system flush logs;
|
||||
|
||||
select count() from system.query_log
|
||||
@ -52,7 +52,7 @@ $CLICKHOUSE_CLIENT -q "CREATE DATABASE ${CLICKHOUSE_DATABASE}_ordinary Engine=Or
|
||||
# debug build on CI, so if this will happen, then DROP query will be
|
||||
# finished instantly, and to avoid flakiness we will retry in this case
|
||||
while :; do
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}_ordinary.data_02352;
|
||||
CREATE TABLE ${CLICKHOUSE_DATABASE}_ordinary.data_02352 (key Int) Engine=Null();
|
||||
"
|
||||
|
@ -9,13 +9,13 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "ATTACH TABLE mv" |& {
|
||||
$CLICKHOUSE_CLIENT -m -q "ATTACH TABLE mv" |& {
|
||||
# CANNOT_GET_CREATE_TABLE_QUERY -- ATTACH TABLE IF EXISTS
|
||||
# TABLE_ALREADY_EXISTS -- ATTACH TABLE IF NOT EXISTS
|
||||
grep -F -m1 Exception | grep -v -e CANNOT_GET_CREATE_TABLE_QUERY -e TABLE_ALREADY_EXISTS
|
||||
}
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
DROP TABLE IF EXISTS null;
|
||||
CREATE TABLE null (key Int) ENGINE = Null;
|
||||
DROP TABLE IF EXISTS mv;
|
||||
|
@ -560,7 +560,6 @@ positionCaseInsensitive
|
||||
positionCaseInsensitiveUTF8
|
||||
positionUTF8
|
||||
pow
|
||||
printf
|
||||
proportionsZTest
|
||||
protocol
|
||||
queryID
|
||||
|
@ -27,7 +27,7 @@ function insert()
|
||||
|
||||
function check_span()
|
||||
{
|
||||
${CLICKHOUSE_CLIENT} -nq "
|
||||
${CLICKHOUSE_CLIENT} -q "
|
||||
SYSTEM FLUSH LOGS;
|
||||
|
||||
SELECT operation_name,
|
||||
@ -50,7 +50,7 @@ ${CLICKHOUSE_CLIENT} -nq "
|
||||
# $2 - value of distributed_foreground_insert
|
||||
function check_span_kind()
|
||||
{
|
||||
${CLICKHOUSE_CLIENT} -nq "
|
||||
${CLICKHOUSE_CLIENT} -q "
|
||||
SYSTEM FLUSH LOGS;
|
||||
|
||||
SELECT count()
|
||||
@ -65,7 +65,7 @@ ${CLICKHOUSE_CLIENT} -nq "
|
||||
#
|
||||
# Prepare tables for tests
|
||||
#
|
||||
${CLICKHOUSE_CLIENT} -nq "
|
||||
${CLICKHOUSE_CLIENT} -q "
|
||||
DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}.dist_opentelemetry;
|
||||
DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}.local_opentelemetry;
|
||||
|
||||
@ -122,7 +122,7 @@ check_span_kind $trace_id 'CLIENT'
|
||||
#
|
||||
# Cleanup
|
||||
#
|
||||
${CLICKHOUSE_CLIENT} -nq "
|
||||
${CLICKHOUSE_CLIENT} -q "
|
||||
DROP TABLE ${CLICKHOUSE_DATABASE}.dist_opentelemetry;
|
||||
DROP TABLE ${CLICKHOUSE_DATABASE}.local_opentelemetry;
|
||||
"
|
||||
|
@ -9,7 +9,7 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS 02419_test SYNC;"
|
||||
|
||||
test_primary_key()
|
||||
{
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
CREATE TABLE 02419_test (key UInt64, value Float64) Engine=KeeperMap('/' || currentDatabase() || '/test2418', 3) PRIMARY KEY($1);
|
||||
INSERT INTO 02419_test VALUES (1, 1.1), (2, 2.2);
|
||||
SELECT value FROM 02419_test WHERE key = 1;
|
||||
|
@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/replication.lib
|
||||
|
||||
|
||||
$CLICKHOUSE_CLIENT -n -q "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
DROP TABLE IF EXISTS alter_table0;
|
||||
DROP TABLE IF EXISTS alter_table1;
|
||||
|
||||
|
@ -21,7 +21,7 @@ wait_for_number_of_parts() {
|
||||
echo "$res"
|
||||
}
|
||||
|
||||
$CLICKHOUSE_CLIENT -nmq "
|
||||
$CLICKHOUSE_CLIENT -mq "
|
||||
DROP TABLE IF EXISTS test_without_merge;
|
||||
DROP TABLE IF EXISTS test_with_merge;
|
||||
|
||||
@ -34,7 +34,7 @@ INSERT INTO test_without_merge SELECT 3;"
|
||||
|
||||
wait_for_number_of_parts 'test_without_merge' 1 10
|
||||
|
||||
$CLICKHOUSE_CLIENT -nmq "
|
||||
$CLICKHOUSE_CLIENT -mq "
|
||||
DROP TABLE test_without_merge;
|
||||
|
||||
SELECT 'With merge any part range';
|
||||
@ -47,7 +47,7 @@ INSERT INTO test_with_merge SELECT 3;"
|
||||
|
||||
wait_for_number_of_parts 'test_with_merge' 1 100
|
||||
|
||||
$CLICKHOUSE_CLIENT -nmq "
|
||||
$CLICKHOUSE_CLIENT -mq "
|
||||
DROP TABLE test_with_merge;
|
||||
|
||||
SELECT 'With merge partition only';
|
||||
@ -60,7 +60,7 @@ INSERT INTO test_with_merge SELECT 3;"
|
||||
|
||||
wait_for_number_of_parts 'test_with_merge' 1 100
|
||||
|
||||
$CLICKHOUSE_CLIENT -nmq "
|
||||
$CLICKHOUSE_CLIENT -mq "
|
||||
SELECT sleepEachRow(1) FROM numbers(9) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null; -- Sleep for 9 seconds and verify that we keep the old part because it's the only one
|
||||
SELECT (now() - modification_time) > 5 FROM system.parts WHERE database = currentDatabase() AND table='test_with_merge' AND active;
|
||||
|
||||
|
@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
-- Limit S3 PUT request per second rate
|
||||
SET s3_max_put_rps = 2;
|
||||
SET s3_max_put_burst = 1;
|
||||
|
@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query "
|
||||
$CLICKHOUSE_CLIENT --query "
|
||||
DROP TABLE IF EXISTS wikistat1 SYNC;
|
||||
DROP TABLE IF EXISTS wikistat2 SYNC;
|
||||
"
|
||||
@ -60,7 +60,7 @@ wait
|
||||
$CLICKHOUSE_CLIENT --query "SELECT count() FROM wikistat1 WHERE NOT ignore(*)"
|
||||
$CLICKHOUSE_CLIENT --query "SELECT count() FROM wikistat2 WHERE NOT ignore(*)"
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query "
|
||||
$CLICKHOUSE_CLIENT --query "
|
||||
DROP TABLE IF EXISTS wikistat1 SYNC;
|
||||
DROP TABLE IF EXISTS wikistat2 SYNC;
|
||||
"
|
||||
|
@ -11,7 +11,7 @@ cp $CURDIR/data_ua_parser/os.yaml ${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}/
|
||||
cp $CURDIR/data_ua_parser/browser.yaml ${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}/
|
||||
cp $CURDIR/data_ua_parser/device.yaml ${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}/
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query="
|
||||
$CLICKHOUSE_CLIENT --query="
|
||||
drop dictionary if exists regexp_os;
|
||||
drop dictionary if exists regexp_browser;
|
||||
drop dictionary if exists regexp_device;
|
||||
@ -61,10 +61,10 @@ create table user_agents
|
||||
Engine = Log();
|
||||
"
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query="
|
||||
$CLICKHOUSE_CLIENT --query="
|
||||
insert into user_agents select ua from input('ua String') FORMAT LineAsString" < $CURDIR/data_ua_parser/useragents.txt
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query="
|
||||
$CLICKHOUSE_CLIENT --query="
|
||||
select ua, device,
|
||||
concat(tupleElement(browser, 1), ' ', tupleElement(browser, 2), '.', tupleElement(browser, 3)) as browser ,
|
||||
concat(tupleElement(os, 1), ' ', tupleElement(os, 2), '.', tupleElement(os, 3), '.', tupleElement(os, 4)) as os
|
||||
@ -74,7 +74,7 @@ from (
|
||||
dictGet('regexp_device', 'device_replacement', ua) device from user_agents) order by ua;
|
||||
"
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query="
|
||||
$CLICKHOUSE_CLIENT --query="
|
||||
drop dictionary if exists regexp_os;
|
||||
drop dictionary if exists regexp_browser;
|
||||
drop dictionary if exists regexp_device;
|
||||
|
@ -27,7 +27,7 @@ cat > "$yaml" <<EOL
|
||||
version: '10'
|
||||
EOL
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query="
|
||||
$CLICKHOUSE_CLIENT --query="
|
||||
drop dictionary if exists regexp_dict1;
|
||||
create dictionary regexp_dict1
|
||||
(
|
||||
@ -69,7 +69,7 @@ cat > "$yaml" <<EOL
|
||||
lucky: 'abcde'
|
||||
EOL
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query="
|
||||
$CLICKHOUSE_CLIENT --query="
|
||||
system reload dictionary regexp_dict1; -- { serverError 489 }
|
||||
"
|
||||
|
||||
@ -79,7 +79,7 @@ cat > "$yaml" <<EOL
|
||||
version: '\1'
|
||||
EOL
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query="
|
||||
$CLICKHOUSE_CLIENT --query="
|
||||
system reload dictionary regexp_dict1; -- { serverError 318 }
|
||||
"
|
||||
|
||||
@ -92,7 +92,7 @@ cat > "$yaml" <<EOL
|
||||
version: '\2.\3'
|
||||
EOL
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query="
|
||||
$CLICKHOUSE_CLIENT --query="
|
||||
system reload dictionary regexp_dict1;
|
||||
select dictGet('regexp_dict1', ('name', 'version'), 'Mozilla/5.0 (BB10; Touch) AppleWebKit/537.3+ (KHTML, like Gecko) Version/10.0.9.388 Mobile Safari/537.3+');
|
||||
select dictGet('regexp_dict1', ('name', 'version'), 'Mozilla/5.0 (PlayBook; U; RIM Tablet OS 1.0.0; en-US) AppleWebKit/534.8+ (KHTML, like Gecko) Version/0.0.1 Safari/534.8+');
|
||||
@ -107,7 +107,7 @@ cat > "$yaml" <<EOL
|
||||
col_array: '[1,2,3,-1,-2,-3]'
|
||||
EOL
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query="
|
||||
$CLICKHOUSE_CLIENT --query="
|
||||
create dictionary regexp_dict2
|
||||
(
|
||||
regexp String,
|
||||
@ -147,7 +147,7 @@ cat > "$yaml" <<EOL
|
||||
EOL
|
||||
|
||||
# dictGetAll
|
||||
$CLICKHOUSE_CLIENT -n --query="
|
||||
$CLICKHOUSE_CLIENT --query="
|
||||
drop dictionary if exists regexp_dict3;
|
||||
create dictionary regexp_dict3
|
||||
(
|
||||
@ -192,7 +192,7 @@ cat > "$yaml" <<EOL
|
||||
tag: 'Documentation'
|
||||
EOL
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query="
|
||||
$CLICKHOUSE_CLIENT --query="
|
||||
drop dictionary if exists regexp_dict3;
|
||||
create dictionary regexp_dict3
|
||||
(
|
||||
@ -252,7 +252,7 @@ cat > "$yaml" <<EOL
|
||||
pattern: '(?-i)hello.*world'
|
||||
EOL
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query="
|
||||
$CLICKHOUSE_CLIENT --query="
|
||||
drop dictionary if exists regexp_dict4;
|
||||
create dictionary regexp_dict4
|
||||
(
|
||||
@ -291,7 +291,7 @@ select dictGetAll('regexp_dict4', 'pattern', 'HELLO WORLD');
|
||||
select dictGetAll('regexp_dict4', 'pattern', 'HELLO\nWORLD');
|
||||
"
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query="
|
||||
$CLICKHOUSE_CLIENT --query="
|
||||
drop dictionary regexp_dict1;
|
||||
drop dictionary regexp_dict2;
|
||||
drop dictionary regexp_dict3;
|
||||
|
@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# Check that if the background cleanup thread works correctly.
|
||||
CLICKHOUSE_TEST_ZOOKEEPER_PREFIX="${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}"
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query "
|
||||
$CLICKHOUSE_CLIENT --query "
|
||||
DROP TABLE IF EXISTS t_async_insert_cleanup SYNC;
|
||||
CREATE TABLE t_async_insert_cleanup (
|
||||
KeyID UInt32
|
||||
@ -27,7 +27,7 @@ old_answer=$($CLICKHOUSE_CLIENT --query "SELECT count(*) FROM system.zookeeper W
|
||||
for i in {1..300}; do
|
||||
answer=$($CLICKHOUSE_CLIENT --query "SELECT count(*) FROM system.zookeeper WHERE path like '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/t_async_insert_cleanup/async_blocks%' settings allow_unrestricted_reads_from_keeper = 'true'")
|
||||
if [ $answer == '10' ]; then
|
||||
$CLICKHOUSE_CLIENT -n --query "DROP TABLE t_async_insert_cleanup SYNC;"
|
||||
$CLICKHOUSE_CLIENT --query "DROP TABLE t_async_insert_cleanup SYNC;"
|
||||
exit 0
|
||||
fi
|
||||
sleep 1
|
||||
@ -36,4 +36,4 @@ done
|
||||
$CLICKHOUSE_CLIENT --query "SELECT count(*) FROM t_async_insert_cleanup"
|
||||
echo $old_answer
|
||||
$CLICKHOUSE_CLIENT --query "SELECT count(*) FROM system.zookeeper WHERE path like '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/t_async_insert_cleanup/async_blocks%' settings allow_unrestricted_reads_from_keeper = 'true'"
|
||||
$CLICKHOUSE_CLIENT -n --query "DROP TABLE t_async_insert_cleanup SYNC;"
|
||||
$CLICKHOUSE_CLIENT --query "DROP TABLE t_async_insert_cleanup SYNC;"
|
||||
|
@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
|
||||
QUERY_ID="${CLICKHOUSE_DATABASE}_test_02585_query_to_kill_id_1"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query_id="$QUERY_ID" --max_rows_to_read 0 -n -q "
|
||||
$CLICKHOUSE_CLIENT --query_id="$QUERY_ID" --max_rows_to_read 0 -q "
|
||||
create temporary table tmp as select * from numbers(100000000);
|
||||
select * from remote('127.0.0.2', 'system.numbers_mt') where number in (select * from tmp);" &> /dev/null &
|
||||
|
||||
|
@ -13,7 +13,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
|
||||
function get_query_id() { random_str 10; }
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
drop table if exists buf;
|
||||
drop table if exists dist;
|
||||
drop table if exists data;
|
||||
@ -31,7 +31,7 @@ query_id="$(get_query_id)"
|
||||
# test, since we care about the difference between NOW() and there should
|
||||
# not be any significant difference.
|
||||
$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -q "select * from dist"
|
||||
$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q "
|
||||
$CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q "
|
||||
system flush logs;
|
||||
select count(), count(distinct initial_query_start_time_microseconds) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String};
|
||||
"
|
||||
@ -42,25 +42,25 @@ query_id="$(get_query_id)"
|
||||
# this query (and all subsequent) should reuse the previous connection (at least most of the time)
|
||||
$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -q "select * from dist"
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q "
|
||||
$CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q "
|
||||
system flush logs;
|
||||
select count(), count(distinct initial_query_start_time_microseconds) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String};
|
||||
"
|
||||
|
||||
echo "INSERT"
|
||||
query_id="$(get_query_id)"
|
||||
$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -nm -q "
|
||||
$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -m -q "
|
||||
insert into dist_dist values (1),(2);
|
||||
select * from data;
|
||||
"
|
||||
|
||||
sleep 1
|
||||
$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q "system flush distributed dist_dist"
|
||||
$CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q "system flush distributed dist_dist"
|
||||
sleep 1
|
||||
$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q "system flush distributed dist"
|
||||
$CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q "system flush distributed dist"
|
||||
|
||||
echo "CHECK"
|
||||
$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q "
|
||||
$CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q "
|
||||
select * from data order by key;
|
||||
system flush logs;
|
||||
select count(), count(distinct initial_query_start_time_microseconds) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String};
|
||||
|
@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
echo "INSERT TO S3"
|
||||
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq "
|
||||
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q "
|
||||
INSERT INTO TABLE FUNCTION s3('http://localhost:11111/test/profile_events.csv', 'test', 'testtest', 'CSV', 'number UInt64') SELECT number FROM numbers(1000000) SETTINGS s3_max_single_part_upload_size = 10, s3_truncate_on_insert = 1;
|
||||
" 2>&1 | $CLICKHOUSE_LOCAL -q "
|
||||
WITH '(\\w+): (\\d+)' AS pattern,
|
||||
@ -30,7 +30,7 @@ SELECT * FROM (
|
||||
"
|
||||
|
||||
echo "CHECK WITH query_log"
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT type,
|
||||
'S3CreateMultipartUpload', ProfileEvents['S3CreateMultipartUpload'],
|
||||
@ -45,7 +45,7 @@ ORDER BY query_start_time DESC;
|
||||
"
|
||||
|
||||
echo "CREATE"
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
DROP TABLE IF EXISTS times;
|
||||
CREATE TABLE times (t DateTime) ENGINE MergeTree ORDER BY t
|
||||
SETTINGS
|
||||
@ -56,29 +56,29 @@ CREATE TABLE times (t DateTime) ENGINE MergeTree ORDER BY t
|
||||
"
|
||||
|
||||
echo "INSERT"
|
||||
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq "
|
||||
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q "
|
||||
INSERT INTO times SELECT now() + INTERVAL 1 day SETTINGS optimize_on_insert = 0;
|
||||
" 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* '
|
||||
|
||||
echo "READ"
|
||||
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq "
|
||||
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q "
|
||||
SELECT '1', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1;
|
||||
" 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* '
|
||||
|
||||
echo "INSERT and READ INSERT"
|
||||
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq "
|
||||
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q "
|
||||
INSERT INTO times SELECT now() + INTERVAL 2 day SETTINGS optimize_on_insert = 0;
|
||||
SELECT '2', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1;
|
||||
INSERT INTO times SELECT now() + INTERVAL 3 day SETTINGS optimize_on_insert = 0;
|
||||
" 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* '
|
||||
|
||||
echo "DROP"
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
DROP TABLE times;
|
||||
"
|
||||
|
||||
echo "CHECK with query_log"
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT type,
|
||||
query,
|
||||
|
@ -21,7 +21,7 @@ wait_for_number_of_parts() {
|
||||
echo "$res"
|
||||
}
|
||||
|
||||
$CLICKHOUSE_CLIENT -nmq "
|
||||
$CLICKHOUSE_CLIENT -mq "
|
||||
DROP TABLE IF EXISTS test_without_merge;
|
||||
DROP TABLE IF EXISTS test_replicated;
|
||||
|
||||
@ -34,7 +34,7 @@ INSERT INTO test_without_merge SELECT 3;"
|
||||
|
||||
wait_for_number_of_parts 'test_without_merge' 1 10
|
||||
|
||||
$CLICKHOUSE_CLIENT -nmq "
|
||||
$CLICKHOUSE_CLIENT -mq "
|
||||
DROP TABLE test_without_merge;
|
||||
|
||||
SELECT 'With merge replicated any part range';
|
||||
@ -47,7 +47,7 @@ INSERT INTO test_replicated SELECT 3;"
|
||||
|
||||
wait_for_number_of_parts 'test_replicated' 1 100
|
||||
|
||||
$CLICKHOUSE_CLIENT -nmq "
|
||||
$CLICKHOUSE_CLIENT -mq "
|
||||
DROP TABLE test_replicated;
|
||||
|
||||
SELECT 'With merge replicated partition only';
|
||||
@ -60,7 +60,7 @@ INSERT INTO test_replicated SELECT 3;"
|
||||
|
||||
wait_for_number_of_parts 'test_replicated' 1 100
|
||||
|
||||
$CLICKHOUSE_CLIENT -nmq "
|
||||
$CLICKHOUSE_CLIENT -mq "
|
||||
SELECT sleepEachRow(1) FROM numbers(9) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null; -- Sleep for 9 seconds and verify that we keep the old part because it's the only one
|
||||
SELECT (now() - modification_time) > 5 FROM system.parts WHERE database = currentDatabase() AND table='test_replicated' AND active;
|
||||
|
||||
|
@ -9,6 +9,6 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
echo "
|
||||
DROP USER IF EXISTS postgresql_user;
|
||||
CREATE USER postgresql_user HOST IP '127.0.0.1' IDENTIFIED WITH no_password;
|
||||
" | $CLICKHOUSE_CLIENT -n
|
||||
" | $CLICKHOUSE_CLIENT
|
||||
|
||||
psql --host localhost --port ${CLICKHOUSE_PORT_POSTGRESQL} ${CLICKHOUSE_DATABASE} --user postgresql_user -c "SELECT 1.23::Decimal256(70) AS test;"
|
||||
|
@ -12,7 +12,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# too slow with this.
|
||||
#
|
||||
# Unfortunately, the test has to buffer it in memory.
|
||||
$CLICKHOUSE_CLIENT --max_memory_usage 16G -nm -q "
|
||||
$CLICKHOUSE_CLIENT --max_memory_usage 16G -m -q "
|
||||
INSERT INTO FUNCTION s3('http://localhost:11111/test/$CLICKHOUSE_DATABASE/test_INT_MAX.tsv', '', '', 'TSV')
|
||||
SELECT repeat('a', 1024) FROM numbers((pow(2, 30) * 2) / 1024)
|
||||
SETTINGS s3_max_single_part_upload_size = '5Gi';
|
||||
|
@ -10,7 +10,7 @@ set -e
|
||||
NUM_REPLICAS=5
|
||||
|
||||
for i in $(seq 1 $NUM_REPLICAS); do
|
||||
$CLICKHOUSE_CLIENT -n -q "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
DROP TABLE IF EXISTS r$i SYNC;
|
||||
CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x SETTINGS replicated_deduplication_window = 1, allow_remote_fs_zero_copy_replication = 1;
|
||||
"
|
||||
|
@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
|
||||
for DISK in s3_disk s3_cache
|
||||
do
|
||||
${CLICKHOUSE_CLIENT} -n --query "
|
||||
${CLICKHOUSE_CLIENT} --query "
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test (id Int32, empty Array(Int32))
|
||||
ENGINE=MergeTree ORDER BY id
|
||||
@ -17,13 +17,13 @@ do
|
||||
SELECT * FROM test;
|
||||
"
|
||||
|
||||
${CLICKHOUSE_CLIENT} -n --query "
|
||||
${CLICKHOUSE_CLIENT} --query "
|
||||
BACKUP TABLE test TO Disk('backups', 'test_s3_backup');
|
||||
DROP TABLE test;
|
||||
RESTORE TABLE test FROM Disk('backups', 'test_s3_backup');
|
||||
" &>/dev/null
|
||||
|
||||
${CLICKHOUSE_CLIENT} -n --query "
|
||||
${CLICKHOUSE_CLIENT} --query "
|
||||
SELECT * FROM test;
|
||||
SELECT empty FROM test;
|
||||
"
|
||||
|
@ -1,2 +1,2 @@
|
||||
default 127.0.0.1 9181 0 0 0 1 1 ['FILTERED_LIST','MULTI_READ','CHECK_NOT_EXISTS','CREATE_IF_NOT_EXISTS']
|
||||
default 127.0.0.1 9181 0 0 0 1 1 ['FILTERED_LIST','MULTI_READ','CHECK_NOT_EXISTS','CREATE_IF_NOT_EXISTS','REMOVE_RECURSIVE']
|
||||
zookeeper2 localhost 9181 0 0 0 1
|
||||
|
@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT -n -q "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS aboba
|
||||
(
|
||||
user_id UInt32,
|
||||
|
@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
${CLICKHOUSE_CLIENT} -nm --query "
|
||||
${CLICKHOUSE_CLIENT} -m --query "
|
||||
DROP TABLE IF EXISTS test_s3;
|
||||
|
||||
CREATE TABLE test_s3 (a UInt64, b UInt64)
|
||||
@ -17,7 +17,7 @@ INSERT INTO test_s3 SELECT number, number FROM numbers(1000000);
|
||||
query="SELECT sum(b) FROM test_s3 WHERE a >= 100000 AND a <= 102000"
|
||||
query_id=$(${CLICKHOUSE_CLIENT} --query "select queryID() from ($query) limit 1" 2>&1)
|
||||
${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS"
|
||||
${CLICKHOUSE_CLIENT} -nm --query "
|
||||
${CLICKHOUSE_CLIENT} -m --query "
|
||||
SELECT
|
||||
ProfileEvents['S3ReadRequestsCount'],
|
||||
ProfileEvents['ReadBufferFromS3Bytes'],
|
||||
|
@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test (a Int32, b String)
|
||||
ENGINE = MergeTree()
|
||||
@ -22,7 +22,7 @@ INSERT INTO test SELECT number, randomString(100) FROM numbers(1000000);
|
||||
"
|
||||
|
||||
QUERY_ID=$RANDOM
|
||||
$CLICKHOUSE_CLIENT --query_id "$QUERY_ID" -nm -q "
|
||||
$CLICKHOUSE_CLIENT --query_id "$QUERY_ID" -m -q "
|
||||
SET enable_filesystem_cache_log = 1;
|
||||
SYSTEM DROP FILESYSTEM CACHE;
|
||||
SELECT * FROM test WHERE NOT ignore() LIMIT 1 FORMAT Null;
|
||||
@ -49,14 +49,14 @@ WHERE query_id = '$QUERY_ID' "
|
||||
|
||||
# File segments cannot be less that 20Mi,
|
||||
# except for last file segment in a file or if file size is less.
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
SELECT count() FROM ($query)
|
||||
WHERE file_segment_size < file_size
|
||||
AND end_offset + 1 != file_size
|
||||
AND file_segment_size < 20 * 1024 * 1024;
|
||||
"
|
||||
|
||||
all=$($CLICKHOUSE_CLIENT -nm -q "
|
||||
all=$($CLICKHOUSE_CLIENT -m -q "
|
||||
SELECT count() FROM ($query)
|
||||
WHERE file_segment_size < file_size AND end_offset + 1 != file_size;
|
||||
")
|
||||
@ -68,7 +68,7 @@ else
|
||||
echo "FAIL"
|
||||
fi
|
||||
|
||||
count=$($CLICKHOUSE_CLIENT -nm -q "
|
||||
count=$($CLICKHOUSE_CLIENT -m -q "
|
||||
SELECT count() FROM ($query)
|
||||
WHERE file_segment_size < file_size
|
||||
AND end_offset + 1 != file_size
|
||||
@ -87,21 +87,21 @@ FROM (SELECT * FROM ($query)) AS cache_log
|
||||
INNER JOIN system.filesystem_cache AS cache
|
||||
ON cache_log.cache_path = cache.cache_path "
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
SELECT count() FROM ($query2)
|
||||
WHERE file_segment_range_begin - file_segment_range_end + 1 < file_size
|
||||
AND file_segment_range_end + 1 != file_size
|
||||
AND downloaded_size < 20 * 1024 * 1024;
|
||||
"
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
SELECT count() FROM ($query2)
|
||||
WHERE file_segment_range_begin - file_segment_range_end + 1 < file_size
|
||||
AND file_segment_range_end + 1 != file_size
|
||||
AND formatReadableSize(downloaded_size) not in ('20.00 MiB', '40.00 MiB');
|
||||
"
|
||||
|
||||
all=$($CLICKHOUSE_CLIENT -nm -q "
|
||||
all=$($CLICKHOUSE_CLIENT -m -q "
|
||||
SELECT count() FROM ($query2)
|
||||
WHERE file_segment_size < file_size AND file_segment_range_end + 1 != file_size;
|
||||
")
|
||||
@ -112,7 +112,7 @@ else
|
||||
echo "FAIL"
|
||||
fi
|
||||
|
||||
count2=$($CLICKHOUSE_CLIENT -nm -q "
|
||||
count2=$($CLICKHOUSE_CLIENT -m -q "
|
||||
SELECT count() FROM ($query2)
|
||||
WHERE file_segment_range_begin - file_segment_range_end + 1 < file_size
|
||||
AND file_segment_range_end + 1 != file_size
|
||||
|
@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
${CLICKHOUSE_CLIENT} -nm --query "
|
||||
${CLICKHOUSE_CLIENT} -m --query "
|
||||
DROP TABLE IF EXISTS test_s3;
|
||||
|
||||
CREATE TABLE test_s3 (a UInt64, b UInt64)
|
||||
@ -25,7 +25,7 @@ do
|
||||
query_id=$(${CLICKHOUSE_CLIENT} --query "select queryID() from ($query) limit 1" 2>&1)
|
||||
${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS"
|
||||
|
||||
RES=$(${CLICKHOUSE_CLIENT} -nm --query "
|
||||
RES=$(${CLICKHOUSE_CLIENT} -m --query "
|
||||
SELECT ProfileEvents['DiskConnectionsPreserved'] > 0
|
||||
FROM system.query_log
|
||||
WHERE type = 'QueryFinish'
|
||||
@ -41,7 +41,7 @@ done
|
||||
|
||||
while true
|
||||
do
|
||||
query_id=$(${CLICKHOUSE_CLIENT} -nq "
|
||||
query_id=$(${CLICKHOUSE_CLIENT} -q "
|
||||
create table mut (n int, m int, k int) engine=ReplicatedMergeTree('/test/02441/{database}/mut', '1') order by n;
|
||||
set insert_keeper_fault_injection_probability=0;
|
||||
insert into mut values (1, 2, 3), (10, 20, 30);
|
||||
@ -60,7 +60,7 @@ do
|
||||
) limit 1 settings max_threads=1;
|
||||
" 2>&1)
|
||||
${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS"
|
||||
RES=$(${CLICKHOUSE_CLIENT} -nm --query "
|
||||
RES=$(${CLICKHOUSE_CLIENT} -m --query "
|
||||
SELECT ProfileEvents['StorageConnectionsPreserved'] > 0
|
||||
FROM system.query_log
|
||||
WHERE type = 'QueryFinish'
|
||||
|
@ -8,7 +8,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
|
||||
set -e
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
drop table if exists data;
|
||||
create table data (key Int) engine=MergeTree() order by tuple() settings disk='s3_disk';
|
||||
insert into data select * from numbers(10);
|
||||
@ -16,28 +16,28 @@ $CLICKHOUSE_CLIENT -nm -q "
|
||||
|
||||
query_id=$(random_str 10)
|
||||
$CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "BACKUP TABLE data TO S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_native_copy') SETTINGS allow_s3_native_copy=true"
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id'
|
||||
"
|
||||
|
||||
query_id=$(random_str 10)
|
||||
$CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "BACKUP TABLE data TO S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_no_native_copy') SETTINGS allow_s3_native_copy=false"
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id'
|
||||
"
|
||||
|
||||
query_id=$(random_str 10)
|
||||
$CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "RESTORE TABLE data AS data_native_copy FROM S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_native_copy') SETTINGS allow_s3_native_copy=true"
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id'
|
||||
"
|
||||
|
||||
query_id=$(random_str 10)
|
||||
$CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "RESTORE TABLE data AS data_no_native_copy FROM S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_no_native_copy') SETTINGS allow_s3_native_copy=false"
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id'
|
||||
"
|
||||
|
@ -8,7 +8,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
|
||||
set -e
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
drop table if exists data;
|
||||
create table data (key Int) engine=MergeTree() order by tuple() settings disk='s3_disk';
|
||||
insert into data select * from numbers(10);
|
||||
|
@ -8,7 +8,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query """
|
||||
$CLICKHOUSE_CLIENT -m --query """
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test (a Int32, b String)
|
||||
ENGINE = MergeTree() ORDER BY tuple()
|
||||
@ -17,17 +17,17 @@ SETTINGS disk = disk(name = 's3_disk', type = cache, max_size = '100Ki', path =
|
||||
|
||||
disk_name="${CLICKHOUSE_TEST_UNIQUE_NAME}"
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query """
|
||||
$CLICKHOUSE_CLIENT -m --query """
|
||||
SELECT count() FROM system.disks WHERE name = '$disk_name'
|
||||
"""
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query """
|
||||
$CLICKHOUSE_CLIENT -m --query """
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test (a Int32, b String)
|
||||
ENGINE = MergeTree() ORDER BY tuple()
|
||||
SETTINGS disk = disk(name = '$disk_name', type = cache, max_size = '100Ki', path = ${CLICKHOUSE_TEST_UNIQUE_NAME}, disk = s3_disk);
|
||||
"""
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query """
|
||||
$CLICKHOUSE_CLIENT -m --query """
|
||||
SELECT count() FROM system.disks WHERE name = '$disk_name'
|
||||
"""
|
||||
|
@ -9,7 +9,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
|
||||
|
||||
disk_name="${CLICKHOUSE_TEST_UNIQUE_NAME}"
|
||||
$CLICKHOUSE_CLIENT -nm --query """
|
||||
$CLICKHOUSE_CLIENT -m --query """
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test (a Int32, b String)
|
||||
ENGINE = MergeTree() ORDER BY tuple()
|
||||
@ -22,29 +22,29 @@ query_id=$RANDOM
|
||||
|
||||
$CLICKHOUSE_CLIENT --query_id "$query_id" --query "SELECT * FROM test FORMAT Null SETTINGS enable_filesystem_cache_log = 1"
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query """
|
||||
$CLICKHOUSE_CLIENT -m --query """
|
||||
SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY kek;
|
||||
""" 2>&1 | grep -q "Invalid cache key hex: kek" && echo "OK" || echo "FAIL"
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q " system flush logs"
|
||||
|
||||
key=$($CLICKHOUSE_CLIENT -nm --query """
|
||||
key=$($CLICKHOUSE_CLIENT -m --query """
|
||||
SELECT key FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1;
|
||||
""")
|
||||
|
||||
offset=$($CLICKHOUSE_CLIENT -nm --query """
|
||||
offset=$($CLICKHOUSE_CLIENT -m --query """
|
||||
SELECT offset FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1;
|
||||
""")
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query """
|
||||
$CLICKHOUSE_CLIENT -m --query """
|
||||
SELECT count() FROM system.filesystem_cache WHERE key = '$key' AND file_segment_range_begin = $offset;
|
||||
"""
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query """
|
||||
$CLICKHOUSE_CLIENT -m --query """
|
||||
SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY $key OFFSET $offset;
|
||||
"""
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query """
|
||||
$CLICKHOUSE_CLIENT -m --query """
|
||||
SELECT count() FROM system.filesystem_cache WHERE key = '$key' AND file_segment_range_begin = $offset;
|
||||
"""
|
||||
|
||||
@ -54,18 +54,18 @@ $CLICKHOUSE_CLIENT --query_id "$query_id" --query "SELECT * FROM test FORMAT Nul
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q " system flush logs"
|
||||
|
||||
key=$($CLICKHOUSE_CLIENT -nm --query """
|
||||
key=$($CLICKHOUSE_CLIENT -m --query """
|
||||
SELECT key FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1;
|
||||
""")
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query """
|
||||
$CLICKHOUSE_CLIENT -m --query """
|
||||
SELECT count() FROM system.filesystem_cache WHERE key = '$key';
|
||||
"""
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query """
|
||||
$CLICKHOUSE_CLIENT -m --query """
|
||||
SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY $key
|
||||
"""
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query """
|
||||
$CLICKHOUSE_CLIENT -m --query """
|
||||
SELECT count() FROM system.filesystem_cache WHERE key = '$key';
|
||||
"""
|
||||
|
@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
DROP TABLE IF EXISTS data;
|
||||
DROP TABLE IF EXISTS data_1;
|
||||
DROP TABLE IF EXISTS data_2;
|
||||
|
@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
drop table if exists data;
|
||||
create table data (key Int) engine=MergeTree() order by tuple();
|
||||
insert into data select * from numbers(10);
|
||||
|
@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
drop table if exists data;
|
||||
create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, disk='s3_disk';
|
||||
-- reading 1e6*8 bytes with 1M bandwith it should take (8-1)/1=7 seconds
|
||||
@ -15,7 +15,7 @@ $CLICKHOUSE_CLIENT -nm -q "
|
||||
|
||||
query_id=$(random_str 10)
|
||||
$CLICKHOUSE_CLIENT --query_id "$query_id" -q "backup table data to S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data/backup2') SETTINGS allow_s3_native_copy=1" --max_backup_bandwidth=1M > /dev/null
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT
|
||||
'native_copy',
|
||||
@ -26,7 +26,7 @@ $CLICKHOUSE_CLIENT -nm -q "
|
||||
|
||||
query_id=$(random_str 10)
|
||||
$CLICKHOUSE_CLIENT --query_id "$query_id" -q "backup table data to S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data/backup3') SETTINGS allow_s3_native_copy=0" --max_backup_bandwidth=1M > /dev/null
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT
|
||||
'no_native_copy',
|
||||
|
@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT -mn -q """
|
||||
$CLICKHOUSE_CLIENT -m -q """
|
||||
DROP TABLE IF EXISTS t1_02867;
|
||||
CREATE TABLE t1_02867 (x UInt64) ENGINE=Set();
|
||||
"""
|
||||
@ -39,4 +39,4 @@ repeat_truncate_insert &
|
||||
|
||||
sleep 10
|
||||
|
||||
$CLICKHOUSE_CLIENT -mn -q "DROP TABLE IF EXISTS t1_02867;"
|
||||
$CLICKHOUSE_CLIENT -m -q "DROP TABLE IF EXISTS t1_02867;"
|
||||
|
@ -10,14 +10,14 @@ echo '{"a" : 1, "obj" : {"f1" : 1, "f2" : "2020-01-01"}}' > $CLICKHOUSE_TEST_UNI
|
||||
echo '{"b" : 2, "obj" : {"f3" : 2, "f2" : "Some string"}}' > $CLICKHOUSE_TEST_UNIQUE_NAME/data2.jsonl
|
||||
echo '{"c" : "hello"}' > $CLICKHOUSE_TEST_UNIQUE_NAME/data3.jsonl
|
||||
|
||||
$CLICKHOUSE_LOCAL -nm -q "
|
||||
$CLICKHOUSE_LOCAL -m -q "
|
||||
set schema_inference_mode = 'union';
|
||||
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl');
|
||||
select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl') order by tuple(*) format JSONEachRow;
|
||||
select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache order by file;
|
||||
"
|
||||
|
||||
$CLICKHOUSE_LOCAL -nm -q "
|
||||
$CLICKHOUSE_LOCAL -m -q "
|
||||
set schema_inference_mode = 'union';
|
||||
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data3.jsonl');
|
||||
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl');
|
||||
@ -25,14 +25,14 @@ desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl');
|
||||
|
||||
cd $CLICKHOUSE_TEST_UNIQUE_NAME/ && tar -cf archive.tar data1.jsonl data2.jsonl data3.jsonl && cd ..
|
||||
|
||||
$CLICKHOUSE_LOCAL -nm -q "
|
||||
$CLICKHOUSE_LOCAL -m -q "
|
||||
set schema_inference_mode = 'union';
|
||||
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl');
|
||||
select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl') order by tuple(*) format JSONEachRow;
|
||||
select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache order by file;
|
||||
"
|
||||
|
||||
$CLICKHOUSE_LOCAL -nm -q "
|
||||
$CLICKHOUSE_LOCAL -m -q "
|
||||
set schema_inference_mode = 'union';
|
||||
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data3.jsonl');
|
||||
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl');
|
||||
@ -41,7 +41,7 @@ desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl');
|
||||
echo 'Error' > $CLICKHOUSE_TEST_UNIQUE_NAME/data4.jsonl
|
||||
$CLICKHOUSE_LOCAL -q "desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3,4}.jsonl') settings schema_inference_mode='union'" 2>&1 | grep -c -F "CANNOT_EXTRACT_TABLE_STRUCTURE"
|
||||
|
||||
$CLICKHOUSE_LOCAL -nm -q "
|
||||
$CLICKHOUSE_LOCAL -m -q "
|
||||
set schema_inference_mode = 'union';
|
||||
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{2,3}.jsonl');
|
||||
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3,4}.jsonl');
|
||||
|
@ -67,7 +67,7 @@ curl "$CLICKHOUSE_URL" --silent --fail --show-error --data "SELECT sum(is_leader
|
||||
|
||||
wait;
|
||||
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
SYSTEM FLUSH LOGS;
|
||||
|
||||
-- Check that number of ZK request is less then a half of (total replicas * concurrency)
|
||||
|
@ -8,7 +8,7 @@ CURDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
drop table if exists r1;
|
||||
drop table if exists r2;
|
||||
|
||||
@ -64,7 +64,7 @@ function insert_duplicates() {
|
||||
|
||||
wait
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
system sync replica r1;
|
||||
system sync replica r2;
|
||||
"
|
||||
@ -84,7 +84,7 @@ function loop()
|
||||
do
|
||||
while ! insert_duplicates
|
||||
do
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
truncate table r1;
|
||||
truncate table r2;
|
||||
system sync replica r1;
|
||||
@ -137,8 +137,8 @@ function list_keeper_nodes() {
|
||||
|
||||
list_keeper_nodes "${table_shared_id}"
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "drop table r1;" --allow_repeated_settings --send_logs_level="error" &
|
||||
$CLICKHOUSE_CLIENT -nm -q "drop table r2;" --allow_repeated_settings --send_logs_level="error" &
|
||||
$CLICKHOUSE_CLIENT -m -q "drop table r1;" --allow_repeated_settings --send_logs_level="error" &
|
||||
$CLICKHOUSE_CLIENT -m -q "drop table r2;" --allow_repeated_settings --send_logs_level="error" &
|
||||
wait
|
||||
|
||||
list_keeper_nodes "${table_shared_id}"
|
||||
|
@ -10,11 +10,11 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT" | sed 's/--session_timezone[= ][^ ]*//g'`"
|
||||
CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_materialized_view=1 --session_timezone Etc/UTC"`"
|
||||
|
||||
$CLICKHOUSE_CLIENT -nq "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view"
|
||||
$CLICKHOUSE_CLIENT -q "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view"
|
||||
|
||||
|
||||
# Basic refreshing.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
create materialized view a
|
||||
refresh after 2 second
|
||||
engine Memory
|
||||
@ -23,41 +23,41 @@ $CLICKHOUSE_CLIENT -nq "
|
||||
select '<1: created view>', view, remaining_dependencies, exception, last_refresh_result in ('Unknown', 'Finished') from refreshes;
|
||||
show create a;"
|
||||
# Wait for any refresh. (xargs trims the string and turns \t and \n into spaces)
|
||||
while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" == 'Unknown' ]
|
||||
while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes -- $LINENO" | xargs`" == 'Unknown' ]
|
||||
do
|
||||
sleep 0.5
|
||||
done
|
||||
start_time="`$CLICKHOUSE_CLIENT -nq "select reinterpret(now64(), 'Int64')"`"
|
||||
start_time="`$CLICKHOUSE_CLIENT -q "select reinterpret(now64(), 'Int64')"`"
|
||||
# Check table contents.
|
||||
$CLICKHOUSE_CLIENT -nq "select '<2: refreshed>', count(), sum(x=0), sum(x=1) from a"
|
||||
$CLICKHOUSE_CLIENT -q "select '<2: refreshed>', count(), sum(x=0), sum(x=1) from a"
|
||||
# Wait for table contents to change.
|
||||
res1="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values'`"
|
||||
res1="`$CLICKHOUSE_CLIENT -q 'select * from a order by x format Values'`"
|
||||
while :
|
||||
do
|
||||
res2="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values -- $LINENO'`"
|
||||
res2="`$CLICKHOUSE_CLIENT -q 'select * from a order by x format Values -- $LINENO'`"
|
||||
[ "$res2" == "$res1" ] || break
|
||||
sleep 0.5
|
||||
done
|
||||
# Wait for another change.
|
||||
while :
|
||||
do
|
||||
res3="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values -- $LINENO'`"
|
||||
res3="`$CLICKHOUSE_CLIENT -q 'select * from a order by x format Values -- $LINENO'`"
|
||||
[ "$res3" == "$res2" ] || break
|
||||
sleep 0.5
|
||||
done
|
||||
# Check that the two changes were at least 1 second apart, in particular that we're not refreshing
|
||||
# like crazy. This is potentially flaky, but we need at least one test that uses non-mocked timer
|
||||
# to make sure the clock+timer code works at all. If it turns out flaky, increase refresh period above.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
select '<3: time difference at least>', min2(reinterpret(now64(), 'Int64') - $start_time, 1000);
|
||||
select '<4: next refresh in>', next_refresh_time-last_refresh_time from refreshes;"
|
||||
|
||||
# Create a source table from which views will read.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
create table src (x Int8) engine Memory as select 1;"
|
||||
|
||||
# Switch to fake clock, change refresh schedule, change query.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
system test view a set fake time '2050-01-01 00:00:01';
|
||||
system wait view a;
|
||||
system refresh view a;
|
||||
@ -68,19 +68,19 @@ $CLICKHOUSE_CLIENT -nq "
|
||||
select '<4.5: altered>', status, last_refresh_result, next_refresh_time from refreshes;
|
||||
show create a;"
|
||||
# Advance time to trigger the refresh.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
select '<5: no refresh>', count() from a;
|
||||
system test view a set fake time '2052-02-03 04:05:06';"
|
||||
while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_time from refreshes -- $LINENO" | xargs`" != '2052-02-03 04:05:06' ]
|
||||
while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_time from refreshes -- $LINENO" | xargs`" != '2052-02-03 04:05:06' ]
|
||||
do
|
||||
sleep 0.5
|
||||
done
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
select '<6: refreshed>', * from a;
|
||||
select '<7: refreshed>', status, last_refresh_result, next_refresh_time from refreshes;"
|
||||
|
||||
# Create a dependent view, refresh it once.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
create materialized view b refresh every 2 year depends on a (y Int32) engine MergeTree order by y empty as select x*10 as y from a;
|
||||
show create b;
|
||||
system test view b set fake time '2052-11-11 11:11:11';
|
||||
@ -88,89 +88,89 @@ $CLICKHOUSE_CLIENT -nq "
|
||||
system wait view b;
|
||||
select '<7.5: created dependent>', last_refresh_time from refreshes where view = 'b';"
|
||||
# Next refresh shouldn't start until the dependency refreshes.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
select '<8: refreshed>', * from b;
|
||||
select '<9: refreshed>', view, status, last_refresh_result, next_refresh_time from refreshes;
|
||||
system test view b set fake time '2054-01-24 23:22:21';"
|
||||
while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies 2054-01-01 00:00:00' ]
|
||||
while [ "`$CLICKHOUSE_CLIENT -q "select status, next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies 2054-01-01 00:00:00' ]
|
||||
do
|
||||
sleep 0.5
|
||||
done
|
||||
|
||||
# Drop the source table, check that refresh fails and doesn't leave a temp table behind.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
select '<9.2: dropping>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase();
|
||||
drop table src;
|
||||
system refresh view a;"
|
||||
$CLICKHOUSE_CLIENT -nq "system wait view a;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO"
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "system wait view a;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO"
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
select '<9.4: dropped>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase();"
|
||||
|
||||
# Create the source table again, check that refresh succeeds (in particular that tables are looked
|
||||
# up by name rather than uuid).
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
select '<10: creating>', view, status, remaining_dependencies, next_refresh_time from refreshes;
|
||||
create table src (x Int16) engine Memory as select 2;
|
||||
system test view a set fake time '2054-01-01 00:00:01';"
|
||||
while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled' ]
|
||||
while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled' ]
|
||||
do
|
||||
sleep 0.5
|
||||
done
|
||||
# Both tables should've refreshed.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
select '<11: chain-refreshed a>', * from a;
|
||||
select '<12: chain-refreshed b>', * from b;
|
||||
select '<13: chain-refreshed>', view, status, remaining_dependencies, last_refresh_result, last_refresh_time, next_refresh_time, exception == '' from refreshes;"
|
||||
|
||||
# Make the dependent table run ahead by one refresh cycle, make sure it waits for the dependency to
|
||||
# catch up to the same cycle.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
system test view b set fake time '2059-01-01 00:00:00';
|
||||
system refresh view b;"
|
||||
while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2060-01-01 00:00:00' ]
|
||||
while [ "`$CLICKHOUSE_CLIENT -q "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2060-01-01 00:00:00' ]
|
||||
do
|
||||
sleep 0.5
|
||||
done
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
system test view b set fake time '2061-01-01 00:00:00';
|
||||
system test view a set fake time '2057-01-01 00:00:00';"
|
||||
while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshes -- $LINENO" | xargs`" != 'Scheduled 2058-01-01 00:00:00 WaitingForDependencies 2060-01-01 00:00:00' ]
|
||||
while [ "`$CLICKHOUSE_CLIENT -q "select status, next_refresh_time from refreshes -- $LINENO" | xargs`" != 'Scheduled 2058-01-01 00:00:00 WaitingForDependencies 2060-01-01 00:00:00' ]
|
||||
do
|
||||
sleep 0.5
|
||||
done
|
||||
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
select '<14: waiting for next cycle>', view, status, remaining_dependencies, next_refresh_time from refreshes;
|
||||
truncate src;
|
||||
insert into src values (3);
|
||||
system test view a set fake time '2060-02-02 02:02:02';"
|
||||
while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2062-01-01 00:00:00' ]
|
||||
while [ "`$CLICKHOUSE_CLIENT -q "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2062-01-01 00:00:00' ]
|
||||
do
|
||||
sleep 0.5
|
||||
done
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
select '<15: chain-refreshed a>', * from a;
|
||||
select '<16: chain-refreshed b>', * from b;
|
||||
select '<17: chain-refreshed>', view, status, next_refresh_time from refreshes;"
|
||||
|
||||
# Get to WaitingForDependencies state and remove the depencency.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
system test view b set fake time '2062-03-03 03:03:03'"
|
||||
while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies' ]
|
||||
while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies' ]
|
||||
do
|
||||
sleep 0.5
|
||||
done
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
alter table b modify refresh every 2 year"
|
||||
while [ "`$CLICKHOUSE_CLIENT -nq "select status, last_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled 2062-03-03 03:03:03' ]
|
||||
while [ "`$CLICKHOUSE_CLIENT -q "select status, last_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled 2062-03-03 03:03:03' ]
|
||||
do
|
||||
sleep 0.5
|
||||
done
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
select '<18: removed dependency>', view, status, remaining_dependencies, last_refresh_time,next_refresh_time, refresh_count from refreshes where view = 'b';
|
||||
show create b;"
|
||||
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
drop table src;
|
||||
drop table a;
|
||||
drop table b;
|
||||
|
@ -12,29 +12,29 @@ CLICKHOUSE_LOG_COMMENT=
|
||||
CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT" | sed 's/--session_timezone[= ][^ ]*//g'`"
|
||||
CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_materialized_view=1 --allow_materialized_view_with_bad_select=0 --session_timezone Etc/UTC"`"
|
||||
|
||||
$CLICKHOUSE_CLIENT -nq "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view"
|
||||
$CLICKHOUSE_CLIENT -q "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view"
|
||||
|
||||
|
||||
# Select from a table that doesn't exist, get an exception.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
create table src (x Int8) engine Memory as select 1;
|
||||
create materialized view c refresh every 1 second (x Int64) engine Memory empty as select * from src;
|
||||
drop table src;"
|
||||
while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes where view = 'c' -- $LINENO" | xargs`" != 'Error' ]
|
||||
while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes where view = 'c' -- $LINENO" | xargs`" != 'Error' ]
|
||||
do
|
||||
sleep 0.5
|
||||
done
|
||||
# Check exception, create src, expect successful refresh.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
select '<19: exception>', exception ilike '%UNKNOWN_TABLE%' ? '1' : exception from refreshes where view = 'c';
|
||||
create table src (x Int64) engine Memory as select 1;
|
||||
system refresh view c;"
|
||||
while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ]
|
||||
while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ]
|
||||
do
|
||||
sleep 0.5
|
||||
done
|
||||
# Rename table.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
select '<20: unexception>', * from c;
|
||||
rename table c to d;
|
||||
select '<21: rename>', * from d;
|
||||
@ -42,130 +42,130 @@ $CLICKHOUSE_CLIENT -nq "
|
||||
|
||||
# Do various things during a refresh.
|
||||
# First make a nonempty view.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
drop table d;
|
||||
truncate src;
|
||||
insert into src values (1);
|
||||
create materialized view e refresh every 1 second (x Int64) engine MergeTree order by x empty as select x + sleepEachRow(1) as x from src settings max_block_size = 1;"
|
||||
while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ]
|
||||
while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ]
|
||||
do
|
||||
sleep 0.5
|
||||
done
|
||||
# Stop refreshes.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
select '<23: simple refresh>', * from e;
|
||||
system stop view e;"
|
||||
while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Disabled' ]
|
||||
while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes -- $LINENO" | xargs`" != 'Disabled' ]
|
||||
do
|
||||
sleep 0.5
|
||||
done
|
||||
# Make refreshes slow, wait for a slow refresh to start. (We stopped refreshes first to make sure
|
||||
# we wait for a slow refresh, not a previous fast one.)
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
insert into src select * from numbers(1000) settings max_block_size=1;
|
||||
system start view e;"
|
||||
while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Running' ]
|
||||
while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes -- $LINENO" | xargs`" != 'Running' ]
|
||||
do
|
||||
sleep 0.5
|
||||
done
|
||||
# Rename.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
rename table e to f;
|
||||
select '<24: rename during refresh>', * from f;
|
||||
select '<25: rename during refresh>', view, status from refreshes where view = 'f';
|
||||
alter table f modify refresh after 10 year;"
|
||||
|
||||
# Cancel.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
system cancel view f;"
|
||||
while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Scheduled' ]
|
||||
while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Scheduled' ]
|
||||
do
|
||||
sleep 0.5
|
||||
done
|
||||
# Check that another refresh doesn't immediately start after the cancelled one.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
select '<27: cancelled>', view, status, last_refresh_result from refreshes where view = 'f';
|
||||
system refresh view f;"
|
||||
while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Running' ]
|
||||
while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Running' ]
|
||||
do
|
||||
sleep 0.5
|
||||
done
|
||||
# Drop.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
drop table f;
|
||||
select '<28: drop during refresh>', view, status from refreshes;
|
||||
select '<28: drop during refresh>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase()"
|
||||
|
||||
# Try OFFSET and RANDOMIZE FOR.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
create materialized view g refresh every 1 week offset 3 day 4 hour randomize for 4 day 1 hour (x Int64) engine Memory empty as select 42 as x;
|
||||
show create g;
|
||||
system test view g set fake time '2050-02-03 15:30:13';"
|
||||
while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time > '2049-01-01' from refreshes -- $LINENO" | xargs`" != '1' ]
|
||||
while [ "`$CLICKHOUSE_CLIENT -q "select next_refresh_time > '2049-01-01' from refreshes -- $LINENO" | xargs`" != '1' ]
|
||||
do
|
||||
sleep 0.5
|
||||
done
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
with '2050-02-10 04:00:00'::DateTime as expected
|
||||
select '<29: randomize>', abs(next_refresh_time::Int64 - expected::Int64) <= 3600*(24*4+1), next_refresh_time != expected from refreshes;"
|
||||
|
||||
# Send data 'TO' an existing table.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
drop table g;
|
||||
create table dest (x Int64) engine MergeTree order by x;
|
||||
truncate src;
|
||||
insert into src values (1);
|
||||
create materialized view h refresh every 1 second to dest empty as select x*10 as x from src;
|
||||
show create h;"
|
||||
while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ]
|
||||
while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ]
|
||||
do
|
||||
sleep 0.5
|
||||
done
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
select '<30: to existing table>', * from dest;
|
||||
insert into src values (2);"
|
||||
while [ "`$CLICKHOUSE_CLIENT -nq "select count() from dest -- $LINENO" | xargs`" != '2' ]
|
||||
while [ "`$CLICKHOUSE_CLIENT -q "select count() from dest -- $LINENO" | xargs`" != '2' ]
|
||||
do
|
||||
sleep 0.5
|
||||
done
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
select '<31: to existing table>', * from dest;
|
||||
drop table dest;
|
||||
drop table h;"
|
||||
|
||||
# Retries.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
create materialized view h2 refresh after 1 year settings refresh_retries = 10 (x Int64) engine Memory as select x*10 + throwIf(x % 2 == 0) as x from src;"
|
||||
$CLICKHOUSE_CLIENT -nq "system wait view h2;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO"
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "system wait view h2;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO"
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
select '<31.5: will retry>', last_refresh_result, retry > 0 from refreshes;
|
||||
create table src2 (x Int8) engine Memory;
|
||||
insert into src2 values (1);
|
||||
exchange tables src and src2;
|
||||
drop table src2;"
|
||||
while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result, retry from refreshes -- $LINENO" | xargs`" != 'Finished 0' ]
|
||||
while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result, retry from refreshes -- $LINENO" | xargs`" != 'Finished 0' ]
|
||||
do
|
||||
sleep 0.5
|
||||
done
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
select '<31.6: did retry>', x from h2;
|
||||
drop table h2"
|
||||
|
||||
# EMPTY
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
create materialized view i refresh after 1 year engine Memory empty as select number as x from numbers(2);
|
||||
create materialized view j refresh after 1 year engine Memory as select number as x from numbers(2);"
|
||||
while [ "`$CLICKHOUSE_CLIENT -nq "select sum(last_success_time is null) from refreshes -- $LINENO" | xargs`" == '2' ]
|
||||
while [ "`$CLICKHOUSE_CLIENT -q "select sum(last_success_time is null) from refreshes -- $LINENO" | xargs`" == '2' ]
|
||||
do
|
||||
sleep 0.5
|
||||
done
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
select '<32: empty>', view, status, last_refresh_result, retry from refreshes order by view;
|
||||
drop table i;
|
||||
drop table j;"
|
||||
|
||||
# APPEND
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
create materialized view k refresh every 10 year append (x Int64) engine Memory empty as select x*10 as x from src;
|
||||
select '<33: append>', * from k;
|
||||
system refresh view k;
|
||||
@ -177,7 +177,7 @@ $CLICKHOUSE_CLIENT -nq "
|
||||
system wait view k;
|
||||
select '<35: append>', * from k order by x;"
|
||||
# ALTER to non-APPEND
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
alter table k modify refresh every 10 year;
|
||||
system wait view k;
|
||||
system refresh view k;
|
||||
@ -187,7 +187,7 @@ $CLICKHOUSE_CLIENT -nq "
|
||||
truncate table src;"
|
||||
|
||||
# APPEND + TO + regular materialized view reading from it.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
create table mid (x Int64) engine MergeTree order by x;
|
||||
create materialized view l refresh every 10 year append to mid empty as select x*10 as x from src;
|
||||
create materialized view m (x Int64) engine Memory as select x*10 as x from mid;
|
||||
@ -204,19 +204,19 @@ $CLICKHOUSE_CLIENT -nq "
|
||||
drop table mid;"
|
||||
|
||||
# Failing to create inner table.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
create materialized view n refresh every 1 second (x Int64) engine MergeTree as select 1 as x from numbers(2);" 2>/dev/null || echo "creating MergeTree without ORDER BY failed, as expected"
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
create materialized view n refresh every 1 second (x Int64) engine MergeTree order by x as select 1 as x from numbers(2);
|
||||
drop table n;"
|
||||
|
||||
# Reading from table that doesn't exist yet.
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
create materialized view o refresh every 1 second (x Int64) engine Memory as select x from nonexist; -- { serverError UNKNOWN_TABLE }
|
||||
create materialized view o (x Int64) engine Memory as select x from nonexist; -- { serverError UNKNOWN_TABLE }
|
||||
create materialized view o (x Int64) engine Memory as select x from nope.nonexist; -- { serverError UNKNOWN_DATABASE }
|
||||
create materialized view o refresh every 1 second (x Int64) engine Memory as select x from nope.nonexist settings allow_materialized_view_with_bad_select = 1;
|
||||
drop table o;"
|
||||
|
||||
$CLICKHOUSE_CLIENT -nq "
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
drop table refreshes;"
|
||||
|
@ -10,7 +10,7 @@ CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspic
|
||||
function test1_insert()
|
||||
{
|
||||
echo "test1 insert"
|
||||
$CH_CLIENT -nmq "insert into test select number, NULL from numbers(3);
|
||||
$CH_CLIENT -mq "insert into test select number, NULL from numbers(3);
|
||||
insert into test select number + 3, number from numbers(3);
|
||||
insert into test select number + 6, ('str_' || toString(number))::Variant(String) from numbers(3);
|
||||
insert into test select number + 9, ('lc_str_' || toString(number))::LowCardinality(String) from numbers(3);
|
||||
@ -21,7 +21,7 @@ insert into test select number + 15, range(number + 1)::Array(UInt64) from numbe
|
||||
function test1_select()
|
||||
{
|
||||
echo "test1 select"
|
||||
$CH_CLIENT -nmq "select v from test order by id;
|
||||
$CH_CLIENT -mq "select v from test order by id;
|
||||
select v.String from test order by id;
|
||||
select v.UInt64 from test order by id;
|
||||
select v.\`LowCardinality(String)\` from test order by id;
|
||||
@ -36,7 +36,7 @@ select v.\`Array(UInt64)\`.size0 from test order by id;"
|
||||
function test2_insert()
|
||||
{
|
||||
echo "test2 insert"
|
||||
$CH_CLIENT -nmq "insert into test select number, NULL from numbers(3);
|
||||
$CH_CLIENT -mq "insert into test select number, NULL from numbers(3);
|
||||
insert into test select number + 3, number % 2 ? NULL : number from numbers(3);
|
||||
insert into test select number + 6, number % 2 ? NULL : ('str_' || toString(number))::Variant(String) from numbers(3);
|
||||
insert into test select number + 9, number % 2 ? CAST(NULL, 'Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))') : CAST(('lc_str_' || toString(number))::LowCardinality(String), 'Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))') from numbers(3);
|
||||
@ -47,7 +47,7 @@ insert into test select number + 15, number % 2 ? CAST(NULL, 'Variant(String, UI
|
||||
function test2_select()
|
||||
{
|
||||
echo "test2 select"
|
||||
$CH_CLIENT -nmq "select v from test order by id;
|
||||
$CH_CLIENT -mq "select v from test order by id;
|
||||
select v.String from test order by id;
|
||||
select v.UInt64 from test order by id;
|
||||
select v.\`LowCardinality(String)\` from test order by id;
|
||||
@ -68,7 +68,7 @@ function test3_insert()
|
||||
function test3_select()
|
||||
{
|
||||
echo "test3 select"
|
||||
$CH_CLIENT -nmq "select v from test order by id;
|
||||
$CH_CLIENT -mq "select v from test order by id;
|
||||
select v.String from test order by id;
|
||||
select v.UInt64 from test order by id;
|
||||
select v.\`LowCardinality(String)\` from test order by id;
|
||||
|
@ -10,7 +10,7 @@ CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspic
|
||||
function test4_insert()
|
||||
{
|
||||
echo "test4 insert"
|
||||
$CH_CLIENT -nmq "insert into test select number, NULL from numbers(100000);
|
||||
$CH_CLIENT -mq "insert into test select number, NULL from numbers(100000);
|
||||
insert into test select number + 100000, number from numbers(100000);
|
||||
insert into test select number + 200000, ('str_' || toString(number))::Variant(String) from numbers(100000);
|
||||
insert into test select number + 300000, ('lc_str_' || toString(number))::LowCardinality(String) from numbers(100000);
|
||||
@ -21,7 +21,7 @@ insert into test select number + 500000, range(number % 20 + 1)::Array(UInt64) f
|
||||
function test4_select
|
||||
{
|
||||
echo "test4 select"
|
||||
$CH_CLIENT -nmq "select v from test format Null;
|
||||
$CH_CLIENT -mq "select v from test format Null;
|
||||
select count() from test where isNotNull(v);
|
||||
select v.String from test format Null;
|
||||
select count() from test where isNotNull(v.String);
|
||||
|
@ -10,7 +10,7 @@ CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspic
|
||||
function test5_insert()
|
||||
{
|
||||
echo "test5 insert"
|
||||
$CH_CLIENT -nmq "
|
||||
$CH_CLIENT -mq "
|
||||
insert into test select number, NULL from numbers(200000);
|
||||
insert into test select number + 200000, number % 2 ? NULL : number from numbers(200000);
|
||||
insert into test select number + 400000, number % 2 ? NULL : ('str_' || toString(number))::Variant(String) from numbers(200000);
|
||||
@ -22,7 +22,7 @@ insert into test select number + 1000000, number % 2 ? CAST(NULL, 'Variant(Strin
|
||||
function test5_select()
|
||||
{
|
||||
echo "test5 select"
|
||||
$CH_CLIENT -nmq "
|
||||
$CH_CLIENT -mq "
|
||||
select v from test format Null;
|
||||
select count() from test where isNotNull(v);
|
||||
select v.String from test format Null;
|
||||
|
@ -17,7 +17,7 @@ function test6_insert()
|
||||
function test6_select()
|
||||
{
|
||||
echo "test6 select"
|
||||
$CH_CLIENT -nmq "select v from test format Null;
|
||||
$CH_CLIENT -mq "select v from test format Null;
|
||||
select count() from test where isNotNull(v);
|
||||
select v.String from test format Null;
|
||||
select count() from test where isNotNull(v.String);
|
||||
|
@ -10,7 +10,7 @@ disk_name="s3_cache_02944"
|
||||
$CLICKHOUSE_CLIENT --query "SYSTEM DROP FILESYSTEM CACHE"
|
||||
$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'"
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query "
|
||||
$CLICKHOUSE_CLIENT -m --query "
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test (a String) engine=MergeTree() ORDER BY tuple() SETTINGS disk = '$disk_name';
|
||||
INSERT INTO test SELECT randomString(100);
|
||||
@ -33,7 +33,7 @@ cat $config_path \
|
||||
> $config_path_tmp
|
||||
mv $config_path_tmp $config_path
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query "
|
||||
$CLICKHOUSE_CLIENT -m --query "
|
||||
set send_logs_level='fatal';
|
||||
SYSTEM RELOAD CONFIG"
|
||||
$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'"
|
||||
@ -47,7 +47,7 @@ cat $config_path \
|
||||
> $config_path_tmp
|
||||
mv $config_path_tmp $config_path
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query "
|
||||
$CLICKHOUSE_CLIENT -m --query "
|
||||
set send_logs_level='fatal';
|
||||
SYSTEM RELOAD CONFIG"
|
||||
$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'"
|
||||
@ -63,7 +63,7 @@ cat $config_path \
|
||||
> $config_path_tmp
|
||||
mv $config_path_tmp $config_path
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query "
|
||||
$CLICKHOUSE_CLIENT -m --query "
|
||||
set send_logs_level='fatal';
|
||||
SYSTEM RELOAD CONFIG"
|
||||
$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'"
|
||||
@ -77,7 +77,7 @@ cat $config_path \
|
||||
> $config_path_tmp
|
||||
mv $config_path_tmp $config_path
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query "
|
||||
$CLICKHOUSE_CLIENT -m --query "
|
||||
set send_logs_level='fatal';
|
||||
SYSTEM RELOAD CONFIG"
|
||||
$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'"
|
||||
|
@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_LOCAL -nm -q "CREATE TABLE test_table (geom MultiPolygon) engine=MergeTree ORDER BY geom;
|
||||
$CLICKHOUSE_LOCAL -m -q "CREATE TABLE test_table (geom MultiPolygon) engine=MergeTree ORDER BY geom;
|
||||
INSERT INTO test_table SELECT * FROM file('$CURDIR/data_parquet/02960_polygon_bound_bug.parquet', Parquet);
|
||||
CREATE DICTIONARY test_dict (geom MultiPolygon) PRIMARY KEY geom SOURCE (CLICKHOUSE(TABLE 'test_table')) LIFETIME(MIN 0 MAX 0) LAYOUT(POLYGON(STORE_POLYGON_KEY_COLUMN 1));
|
||||
SELECT dictHas(test_dict,(174.84729269276494,-36.99524960275426));"
|
||||
|
@ -24,7 +24,7 @@ cat $config_path \
|
||||
> $config_path_tmp
|
||||
mv $config_path_tmp $config_path
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query "
|
||||
$CLICKHOUSE_CLIENT -m --query "
|
||||
set send_logs_level='error';
|
||||
SYSTEM RELOAD CONFIG" 2>&1 | grep -c 'volume_priority values must be unique across the policy'
|
||||
|
||||
@ -40,7 +40,7 @@ cat $config_path \
|
||||
> $config_path_tmp
|
||||
mv $config_path_tmp $config_path
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query "
|
||||
$CLICKHOUSE_CLIENT -m --query "
|
||||
set send_logs_level='error';
|
||||
SYSTEM RELOAD CONFIG" 2>&1 | grep -c 'volume_priority values must cover the range from 1 to N (lowest priority specified) without gaps'
|
||||
|
||||
|
@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
|
||||
disk_name="02963_remote_read_bug"
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query "
|
||||
$CLICKHOUSE_CLIENT -m --query "
|
||||
DROP TABLE IF EXISTS test;
|
||||
|
||||
CREATE TABLE test (a Int32, s String)
|
||||
@ -22,7 +22,7 @@ OPTIMIZE TABLE test FINAL;
|
||||
|
||||
query_id=$(random_str 10)
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query_id "$query_id" --query "
|
||||
$CLICKHOUSE_CLIENT -m --query_id "$query_id" --query "
|
||||
WITH RANDOM_SET AS (
|
||||
SELECT rand32() % 10000 FROM numbers(100)
|
||||
)
|
||||
@ -37,7 +37,7 @@ SETTINGS
|
||||
merge_tree_min_bytes_for_concurrent_read_for_remote_filesystem = 1, merge_tree_min_rows_for_concurrent_read_for_remote_filesystem = 1;
|
||||
"
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm --query "
|
||||
$CLICKHOUSE_CLIENT -m --query "
|
||||
SYSTEM FLUSH LOGS;
|
||||
|
||||
-- This threshold was determined experimentally - before the fix this ratio had values around 50K
|
||||
|
@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
drop table if exists num_1;
|
||||
drop table if exists num_2;
|
||||
|
||||
|
@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
drop table if exists num_1;
|
||||
drop table if exists num_2;
|
||||
|
||||
|
@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
drop table if exists num_1;
|
||||
drop table if exists num_2;
|
||||
|
||||
|
@ -24,12 +24,12 @@ $CLICKHOUSE_LOCAL -q "select * from generateRandom('a UInt64, b String, c Array(
|
||||
|
||||
$CLICKHOUSE_LOCAL -q "desc file('$DATA_FILE', auto, 'a UInt64, b String, c Array(UInt64), d Tuple(a UInt64, b String)')"
|
||||
|
||||
$CLICKHOUSE_LOCAL -nmq "
|
||||
$CLICKHOUSE_LOCAL -mq "
|
||||
desc file('$DATA_FILE');
|
||||
desc file('$DATA_FILE');
|
||||
"
|
||||
|
||||
$CLICKHOUSE_LOCAL -nmq "
|
||||
$CLICKHOUSE_LOCAL -mq "
|
||||
desc file('$DATA_FILE', JSONEachRow);
|
||||
desc file('$DATA_FILE');
|
||||
"
|
||||
@ -39,7 +39,7 @@ $CLICKHOUSE_LOCAL -q "select * from generateRandom('a UInt64, b String, c Array(
|
||||
$CLICKHOUSE_LOCAL -q "desc file('$DATA_FILE.{1,2}')"
|
||||
$CLICKHOUSE_LOCAL -q "desc file('$DATA_FILE.{1,2}') settings schema_inference_mode='union'" 2>&1 | grep -c "CANNOT_DETECT_FORMAT"
|
||||
|
||||
$CLICKHOUSE_LOCAL -nmq "
|
||||
$CLICKHOUSE_LOCAL -mq "
|
||||
desc file('$DATA_FILE.2');
|
||||
desc file('$DATA_FILE.{1,2}');
|
||||
"
|
||||
|
@ -19,7 +19,7 @@ $CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -q "create database $n
|
||||
CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT/--database=$CLICKHOUSE_DATABASE/--database=$new_database}
|
||||
CLICKHOUSE_DATABASE="$new_database"
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
drop table if exists data;
|
||||
create table data (key Int) engine=MergeTree() order by key;
|
||||
insert into data values (1);
|
||||
@ -29,7 +29,7 @@ $CLICKHOUSE_CLIENT -nm -q "
|
||||
# suppress output
|
||||
$CLICKHOUSE_CLIENT -q "backup table data to S3('http://localhost:11111/test/s3_plain/backups/$CLICKHOUSE_DATABASE', 'test', 'testtest')" > /dev/null
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
drop table data;
|
||||
attach table data (key Int) engine=MergeTree() order by key
|
||||
settings
|
||||
|
@ -18,7 +18,7 @@ $CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -q "create database $n
|
||||
CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT/--database=$CLICKHOUSE_DATABASE/--database=$new_database}
|
||||
CLICKHOUSE_DATABASE="$new_database"
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
drop table if exists data_read;
|
||||
drop table if exists data_write;
|
||||
|
||||
@ -33,7 +33,7 @@ $CLICKHOUSE_CLIENT -nm -q "
|
||||
# suppress output
|
||||
$CLICKHOUSE_CLIENT -q "backup table data_read to S3('http://localhost:11111/test/s3_plain/backups/$CLICKHOUSE_DATABASE', 'test', 'testtest')" > /dev/null
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
drop table data_read;
|
||||
attach table data_read (key Int) engine=ReplicatedMergeTree('/tables/{database}/data', 'read') order by key
|
||||
settings
|
||||
@ -57,7 +57,7 @@ echo "Files before DETACH TABLE"
|
||||
# sed to match any part, since in case of fault injection part name may not be all_0_0_0 but all_1_1_0
|
||||
clickhouse-disks -C "$config" --disk s3_plain_disk --query "list --recursive $path" | tail -n+2 | sed 's/all_[^_]*_[^_]*_0/all_X_X_X/g'
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
detach table data_read;
|
||||
detach table data_write;
|
||||
"
|
||||
|
@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do
|
||||
fi
|
||||
echo "$THIS_RUN"
|
||||
|
||||
$CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq "
|
||||
$CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq "
|
||||
$(python3 $CURDIR/03008_deduplication.python insert_several_blocks_into_table \
|
||||
--insert-method $insert_method \
|
||||
--table-engine $ENGINE \
|
||||
|
@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do
|
||||
fi
|
||||
echo "$THIS_RUN"
|
||||
|
||||
$CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq "
|
||||
$CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq "
|
||||
$(python3 $CURDIR/03008_deduplication.python insert_several_blocks_into_table \
|
||||
--insert-method $insert_method \
|
||||
--table-engine $ENGINE \
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user