Compare commits

...

6 Commits

Author SHA1 Message Date
alesapin
96bba07d9c
Merge pull request #72346 from ClickHouse/add_final
Add some random final modifier to some classes
2024-11-23 22:48:34 +00:00
alesapin
ce82c19c2c Add some random final modifier to some classes 2024-11-23 18:38:10 +01:00
Raúl Marín
de13b819f0
Merge pull request #72319 from ClickHouse/revert-71774-enable_http_compression_default
Revert "Set enable_http_compression default value to 1"
2024-11-23 15:59:24 +00:00
Antonio Andelic
558f639f2a
Merge pull request #72283 from ClickHouse/make-terminal-beautiful-again
Apply colors correctly to terminal output
2024-11-23 15:27:04 +00:00
Raúl Marín
52391a8271
Revert "Set enable_http_compression default value to 1" 2024-11-22 22:51:28 +01:00
Antonio Andelic
844513b1d0 correctly cast write buffer 2024-11-22 16:16:22 +01:00
9 changed files with 47 additions and 12 deletions

View File

@ -136,7 +136,7 @@ ClickHouse применяет настройку в тех случаях, ко
- 0 — выключена.
- 1 — включена.
Значение по умолчанию: 1.
Значение по умолчанию: 0.
## http_zlib_compression_level {#settings-http_zlib_compression_level}

View File

@ -97,7 +97,7 @@ ClickHouse从表的过时副本中选择最相关的副本。
- 0 — Disabled.
- 1 — Enabled.
默认值:1
默认值:0
## http_zlib_compression_level {#settings-http_zlib_compression_level}

View File

@ -1800,7 +1800,7 @@ Possible values:
- 0 Disabled.
- 1 Enabled.
)", 1) \
)", 0) \
DECLARE(Int64, http_zlib_compression_level, 3, R"(
Sets the level of data compression in the response to an HTTP request if [enable_http_compression = 1](#enable_http_compression).

View File

@ -64,7 +64,6 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
},
{"24.11",
{
{"enable_http_compression", false, true, "Improvement for read-only clients since they can't change settings"},
{"validate_mutation_query", false, true, "New setting to validate mutation queries by default."},
{"enable_job_stack_trace", false, true, "Enable by default collecting stack traces from job's scheduling."},
{"allow_suspicious_types_in_group_by", true, false, "Don't allow Variant/Dynamic types in GROUP BY by default"},

View File

@ -1451,7 +1451,7 @@ String fourSpaceIndent(size_t indent);
bool inline isWritingToTerminal(const WriteBuffer & buf)
{
const auto * write_buffer_to_descriptor = typeid_cast<const WriteBufferFromFileDescriptor *>(&buf);
const auto * write_buffer_to_descriptor = dynamic_cast<const WriteBufferFromFileDescriptor *>(&buf);
return write_buffer_to_descriptor && write_buffer_to_descriptor->getFD() == STDOUT_FILENO && isatty(STDOUT_FILENO);
}

View File

@ -51,7 +51,7 @@ namespace
}
/// Reads chunks from file in native format. Provide chunks with aggregation info.
class SourceFromNativeStream : public ISource
class SourceFromNativeStream final : public ISource
{
public:
explicit SourceFromNativeStream(const Block & header, TemporaryBlockStreamReaderHolder tmp_stream_)
@ -84,7 +84,7 @@ namespace
/// Worker which merges buckets for two-level aggregation.
/// Atomically increments bucket counter and returns merged result.
class ConvertingAggregatedToChunksWithMergingSource : public ISource
class ConvertingAggregatedToChunksWithMergingSource final : public ISource
{
public:
static constexpr UInt32 NUM_BUCKETS = 256;
@ -143,7 +143,7 @@ private:
};
/// Asks Aggregator to convert accumulated aggregation state into blocks (without merging) and pushes them to later steps.
class ConvertingAggregatedToChunksSource : public ISource
class ConvertingAggregatedToChunksSource final : public ISource
{
public:
ConvertingAggregatedToChunksSource(AggregatingTransformParamsPtr params_, AggregatedDataVariantsPtr variant_)
@ -188,7 +188,7 @@ private:
};
/// Reads chunks from GroupingAggregatedTransform (stored in ChunksToMerge structure) and outputs them.
class FlattenChunksToMergeTransform : public IProcessor
class FlattenChunksToMergeTransform final : public IProcessor
{
public:
explicit FlattenChunksToMergeTransform(const Block & input_header, const Block & output_header)
@ -272,7 +272,7 @@ private:
/// ConvertingAggregatedToChunksWithMergingSource ->
///
/// Result chunks guaranteed to be sorted by bucket number.
class ConvertingAggregatedToChunksTransform : public IProcessor
class ConvertingAggregatedToChunksTransform final : public IProcessor
{
public:
ConvertingAggregatedToChunksTransform(AggregatingTransformParamsPtr params_, ManyAggregatedDataVariantsPtr data_, size_t num_threads_)

View File

@ -22,7 +22,7 @@ namespace CurrentMetrics
namespace DB
{
class AggregatedChunkInfo : public ChunkInfoCloneable<AggregatedChunkInfo>
class AggregatedChunkInfo final : public ChunkInfoCloneable<AggregatedChunkInfo>
{
public:
bool is_overflows = false;
@ -149,7 +149,7 @@ using ManyAggregatedDataPtr = std::shared_ptr<ManyAggregatedData>;
* At aggregation step, every transform uses it's own AggregatedDataVariants structure.
* At merging step, all structures pass to ConvertingAggregatedToChunksTransform.
*/
class AggregatingTransform : public IProcessor
class AggregatingTransform final : public IProcessor
{
public:
AggregatingTransform(Block header, AggregatingTransformParamsPtr params_);

View File

@ -0,0 +1,36 @@
#!/usr/bin/expect -f
set basedir [file dirname $argv0]
set basename [file tail $argv0]
if {[info exists env(CLICKHOUSE_TMP)]} {
set CLICKHOUSE_TMP $env(CLICKHOUSE_TMP)
} else {
set CLICKHOUSE_TMP "."
}
exp_internal -f $CLICKHOUSE_TMP/$basename.debuglog 0
set history_file $CLICKHOUSE_TMP/$basename.history
log_user 0
set timeout 60
match_max 100000
expect_after {
# Do not ignore eof from expect
-i $any_spawn_id eof { exp_continue }
# A default timeout action is to do nothing, change it to fail
-i $any_spawn_id timeout { exit 1 }
}
# useful debugging configuration
# exp_internal 1
spawn bash -c "source $basedir/../shell_config.sh ; \$CLICKHOUSE_CLIENT_BINARY \$CLICKHOUSE_CLIENT_OPT --disable_suggestion --enable-progress-table-toggle=0 --highlight 0 --history_file=$history_file"
expect ":) "
# Make a query
send -- "SELECT 1 as Hello\r"
expect -re "\\\[1mHello.*\\\[90m1\\\."
expect ":) "
send -- "exit\r"
expect eof