Merge branch 'master' into vdimir/join_select_inner_table

This commit is contained in:
vdimir 2024-10-02 15:55:27 +00:00
commit 7777617b76
No known key found for this signature in database
GPG Key ID: 6EE4CE2BEDC51862
147 changed files with 1235 additions and 1150 deletions

View File

@ -7,14 +7,24 @@ import subprocess
import sys
def build_docker_deps(image_name, imagedir):
cmd = f"""docker run --entrypoint "/bin/bash" {image_name} -c "pip install pipdeptree 2>/dev/null 1>/dev/null && pipdeptree --freeze --warn silence | sed 's/ \+//g' | sort | uniq" > {imagedir}/requirements.txt"""
def build_docker_deps(image_name: str, imagedir: str) -> None:
print("Fetch the newest manifest for", image_name)
pip_cmd = (
"pip install pipdeptree 2>/dev/null 1>/dev/null && pipdeptree --freeze "
"--warn silence --exclude pipdeptree"
)
# /=/!d - remove dependencies without pin
# ubuntu - ignore system packages
# \s - remove spaces
sed = r"sed '/==/!d; /==.*+ubuntu/d; s/\s//g'"
cmd = rf"""docker run --rm --entrypoint "/bin/bash" {image_name} -c "{pip_cmd} | {sed} | sort -u" > {imagedir}/requirements.txt"""
print("Running the command:", cmd)
subprocess.check_call(cmd, shell=True)
def check_docker_file_install_with_pip(filepath):
image_name = None
with open(filepath, "r") as f:
with open(filepath, "r", encoding="utf-8") as f:
for line in f:
if "docker build" in line:
arr = line.split(" ")
@ -25,7 +35,7 @@ def check_docker_file_install_with_pip(filepath):
return image_name, False
def process_affected_images(images_dir):
def process_affected_images(images_dir: str) -> None:
for root, _dirs, files in os.walk(images_dir):
for f in files:
if f == "Dockerfile":

View File

@ -48,7 +48,7 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
&& add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -c -s) ${DOCKER_CHANNEL}" \
&& apt-get update \
&& env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
docker-ce='5:23.*' \
docker-ce='5:23.*' docker-compose-plugin='2.29.*' \
&& rm -rf \
/var/lib/apt/lists/* \
/var/cache/debconf \

View File

@ -1,15 +1,13 @@
PyHDFS==0.3.1
PyJWT==2.3.0
PyMySQL==1.1.0
PyJWT==2.4.0
PyMySQL==1.1.1
PyNaCl==1.5.0
PyYAML==5.3.1
SecretStorage==3.3.1
argon2-cffi-bindings==21.2.0
argon2-cffi==23.1.0
async-timeout==4.0.3
asyncio==3.4.3
attrs==23.2.0
avro==1.10.2
avro==1.11.3
azure-core==1.30.1
azure-storage-blob==12.19.0
bcrypt==4.1.3
@ -24,18 +22,13 @@ cffi==1.16.0
charset-normalizer==3.3.2
click==8.1.7
confluent-kafka==2.3.0
cryptography==3.4.8
cryptography==42.0.0
dbus-python==1.2.18
decorator==5.1.1
delta-spark==2.3.0
deltalake==0.16.0
dict2xml==1.7.4
dicttoxml==1.7.16
distro-info==1.1+ubuntu0.2
distro==1.7.0
docker-compose==1.29.2
docker==6.1.3
dockerpty==0.4.1
docopt==0.6.2
exceptiongroup==1.2.1
execnet==2.1.1
geomet==0.2.1.post1
@ -49,7 +42,6 @@ iniconfig==2.0.0
isodate==0.6.1
jeepney==0.7.1
jmespath==1.0.1
jsonschema==3.2.0
jwcrypto==1.5.6
kafka-python==2.0.2
kazoo==2.9.0
@ -63,23 +55,22 @@ lz4==4.3.3
minio==7.2.3
more-itertools==8.10.0
nats-py==2.6.0
numpy==2.1.0
oauthlib==3.2.0
packaging==24.0
paramiko==3.4.0
pika==1.2.0
pip==24.1.1
pipdeptree==2.23.0
pluggy==1.5.0
protobuf==4.25.2
psycopg2-binary==2.9.6
py4j==0.10.9.5
py==1.11.0
pyarrow-hotfix==0.6
pyarrow==17.0.0
pycparser==2.22
pycryptodome==3.20.0
pymongo==3.11.0
pyparsing==2.4.7
pyrsistent==0.20.0
pyspark==3.3.2
pyspnego==0.10.2
pytest-order==1.0.0
@ -89,28 +80,22 @@ pytest-reportlog==0.4.0
pytest-timeout==2.2.0
pytest-xdist==3.5.0
pytest==7.4.4
python-apt==2.4.0+ubuntu3
python-dateutil==2.9.0.post0
python-dotenv==0.21.1
pytz==2023.3.post1
redis==5.0.1
requests-kerberos==0.14.0
requests==2.31.0
retry==0.9.2
s3transfer==0.10.1
setuptools==59.6.0
setuptools==70.0.0
simplejson==3.19.2
six==1.16.0
soupsieve==2.5
texttable==1.7.0
tomli==2.0.1
typing_extensions==4.11.0
tzlocal==2.1
unattended-upgrades==0.1
urllib3==2.0.7
wadllib==1.3.6
websocket-client==0.59.0
wheel==0.37.1
websocket-client==1.8.0
wheel==0.38.1
zipp==1.0.0
deltalake==0.16.0

View File

@ -1064,4 +1064,32 @@ Possible values:
- throw, drop, rebuild
Default value: throw
Default value: throw
## min_free_disk_bytes_to_perform_insert
The minimum number of bytes that should be free in disk space in order to insert data. If the number of available free bytes is less than `min_free_disk_bytes_to_throw_insert` then an exception is thrown and the insert is not executed. Note that this setting:
- takes into account the `keep_free_space_bytes` setting.
- does not take into account the amount of data that will be written by the `INSERT` operation.
- is only checked if a positive (non-zero) number of bytes is specified
Possible values:
- Any positive integer.
Default value: 0 bytes.
## min_free_disk_ratio_to_perform_insert
The minimum free to total disk space ratio to perform an `INSERT`. Must be a floating point value between 0 and 1. Note that this setting:
- takes into account the `keep_free_space_bytes` setting.
- does not take into account the amount of data that will be written by the `INSERT` operation.
- is only checked if a positive (non-zero) ratio is specified
Possible values:
- Float, 0.0 - 1.0
Default value: 0.0
Note that if both `min_free_disk_ratio_to_perform_insert` and `min_free_disk_bytes_to_perform_insert` are specified, ClickHouse will count on the value that will allow to perform inserts on a bigger amount of free memory.

View File

@ -2045,7 +2045,7 @@ Possible values:
- 0 - Disabled.
- 1 - Enabled.
Default value: `0`.
Default value: `1`.
### async_insert_busy_timeout_min_ms {#async-insert-busy-timeout-min-ms}
@ -5687,6 +5687,12 @@ Enable `IF NOT EXISTS` for `CREATE` statement by default. If either this setting
Default value: `false`.
## enable_secure_identifiers
If enabled, only allow secure identifiers which contain only underscore and alphanumeric characters
Default value: `false`.
## show_create_query_identifier_quoting_rule
Define identifier quoting behavior of the show create query result:

View File

@ -19,9 +19,7 @@ avgWeighted(x, weight)
- `weight` — Weights of the values.
`x` and `weight` must both be
[Integer](../../../sql-reference/data-types/int-uint.md),
[floating-point](../../../sql-reference/data-types/float.md), or
[Decimal](../../../sql-reference/data-types/decimal.md),
[Integer](../../../sql-reference/data-types/int-uint.md) or [floating-point](../../../sql-reference/data-types/float.md),
but may have different types.
**Returned value**

View File

@ -1974,6 +1974,38 @@ Result:
Converts a date, or date with time, to a UInt16 number containing the ISO Year number.
**Syntax**
```sql
toISOYear(value)
```
**Arguments**
- `value` — The value with date or date with time.
**Returned value**
- `value` converted to the current ISO year number. [UInt16](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT
toISOYear(toDate('2024/10/02')) as year1,
toISOYear(toDateTime('2024/10/02 01:30:00')) as year2
```
Result:
```response
┌─year1─┬─year2─┐
│ 2024 │ 2024 │
└───────┴───────┘
```
## toISOWeek
Converts a date, or date with time, to a UInt8 number containing the ISO Week number.

View File

@ -280,6 +280,38 @@ SELECT
Same as `toIPv4`, but if the IPv4 address has an invalid format, it returns `0.0.0.0` (0 IPv4).
**Syntax**
```sql
toIPv4OrDefault(value)
```
**Arguments**
- `value` — The value with IPv4 address.
**Returned value**
- `value` converted to the current IPv4 address. [String](../data-types/string.md).
**Example**
Query:
```sql
SELECT
toIPv4OrDefault('192.168.0.1') AS s1,
toIPv4OrDefault('192.168.0') AS s2
```
Result:
```response
┌─s1──────────┬─s2──────┐
│ 192.168.0.1 │ 0.0.0.0 │
└─────────────┴─────────┘
```
## toIPv4OrNull(string)
Same as `toIPv4`, but if the IPv4 address has an invalid format, it returns null.

View File

@ -207,7 +207,31 @@ If `NULL` is passed, then the function returns type `Nullable(Nothing)`, which c
**Syntax**
```sql
toTypeName(x)
toTypeName(value)
```
**Arguments**
- `value` — The value with any arbitrary.
**Returned value**
- `value` converted to the current data type name. [String](../data-types/string.md).
**Example**
Query:
```sql
SELECT toTypeName(123);
```
Result:
```response
┌─toTypeName(123)─┐
│ UInt8 │
└─────────────────┘
```
## blockSize {#blockSize}
@ -500,6 +524,30 @@ Useful in table engine parameters of `CREATE TABLE` queries where you need to sp
currentDatabase()
```
**Arguments**
None.
**Returned value**
- `value` returns the current database name. [String](../data-types/string.md).
**Example**
Query:
```sql
SELECT currentDatabase()
```
Result:
```response
┌─currentDatabase()─┐
│ default │
└───────────────────┘
```
## currentUser {#currentUser}
Returns the name of the current user. In case of a distributed query, the name of the user who initiated the query is returned.

View File

@ -21,8 +21,7 @@ avgWeighted(x, weight)
`x``weight` 的类型必须是
[整数](../../../sql-reference/data-types/int-uint.md), 或
[浮点数](../../../sql-reference/data-types/float.md), 或
[定点数](../../../sql-reference/data-types/decimal.md),
[浮点数](../../../sql-reference/data-types/float.md),
但是可以不一样。
**返回值**

View File

@ -447,51 +447,6 @@ public:
}
}
void readAndMerge(DB::ReadBuffer & rb)
{
UInt8 rhs_skip_degree = 0;
DB::readBinaryLittleEndian(rhs_skip_degree, rb);
if (rhs_skip_degree > skip_degree)
{
skip_degree = rhs_skip_degree;
rehash();
}
size_t rhs_size = 0;
DB::readVarUInt(rhs_size, rb);
if (rhs_size > UNIQUES_HASH_MAX_SIZE)
throw Poco::Exception("Cannot read UniquesHashSet: too large size_degree.");
if ((1ULL << size_degree) < rhs_size)
{
UInt8 new_size_degree = std::max(UNIQUES_HASH_SET_INITIAL_SIZE_DEGREE, static_cast<int>(log2(rhs_size - 1)) + 2);
resize(new_size_degree);
}
if (rhs_size <= 1)
{
for (size_t i = 0; i < rhs_size; ++i)
{
HashValue x = 0;
DB::readBinaryLittleEndian(x, rb);
insertHash(x);
}
}
else
{
auto hs = std::make_unique<HashValue[]>(rhs_size);
rb.readStrict(reinterpret_cast<char *>(hs.get()), rhs_size * sizeof(HashValue));
for (size_t i = 0; i < rhs_size; ++i)
{
DB::transformEndianness<std::endian::native, std::endian::little>(hs[i]);
insertHash(hs[i]);
}
}
}
static void skip(DB::ReadBuffer & rb)
{
size_t size = 0;

View File

@ -770,9 +770,11 @@ ColumnPtr ColumnTuple::compress() const
return ColumnCompressed::create(size(), byte_size,
[my_compressed = std::move(compressed)]() mutable
{
for (auto & column : my_compressed)
column = column->decompress();
return ColumnTuple::create(my_compressed);
Columns decompressed;
decompressed.reserve(my_compressed.size());
for (const auto & column : my_compressed)
decompressed.push_back(column->decompress());
return ColumnTuple::create(decompressed);
});
}

View File

@ -1393,9 +1393,11 @@ ColumnPtr ColumnVariant::compress() const
return ColumnCompressed::create(size(), byte_size,
[my_local_discriminators_compressed = std::move(local_discriminators_compressed), my_offsets_compressed = std::move(offsets_compressed), my_compressed = std::move(compressed), my_local_to_global_discriminators = this->local_to_global_discriminators]() mutable
{
for (auto & variant : my_compressed)
variant = variant->decompress();
return ColumnVariant::create(my_local_discriminators_compressed->decompress(), my_offsets_compressed->decompress(), my_compressed, my_local_to_global_discriminators);
Columns decompressed;
decompressed.reserve(my_compressed.size());
for (const auto & variant : my_compressed)
decompressed.push_back(variant->decompress());
return ColumnVariant::create(my_local_discriminators_compressed->decompress(), my_offsets_compressed->decompress(), decompressed, my_local_to_global_discriminators);
});
}

View File

@ -177,48 +177,6 @@ public:
}
}
void readAndMerge(DB::ReadBuffer & in)
{
auto container_type = getContainerType();
/// If readAndMerge is called with an empty state, just deserialize
/// the state is specified as a parameter.
if ((container_type == details::ContainerType::SMALL) && small.empty())
{
read(in);
return;
}
UInt8 v;
readBinary(v, in);
auto rhs_container_type = static_cast<details::ContainerType>(v);
auto max_container_type = details::max(container_type, rhs_container_type);
if (container_type != max_container_type)
{
if (max_container_type == details::ContainerType::MEDIUM)
toMedium();
else if (max_container_type == details::ContainerType::LARGE)
toLarge();
}
if (rhs_container_type == details::ContainerType::SMALL)
{
typename Small::Reader reader(in);
while (reader.next())
insert(reader.get());
}
else if (rhs_container_type == details::ContainerType::MEDIUM)
{
typename Medium::Reader reader(in);
while (reader.next())
insert(reader.get());
}
else if (rhs_container_type == details::ContainerType::LARGE)
getContainer<Large>().readAndMerge(in);
}
void write(DB::WriteBuffer & out) const
{
auto container_type = getContainerType();

View File

@ -16,11 +16,4 @@ public:
if (Base::buf[i].isZero(*this) && !rhs.buf[i].isZero(*this))
new (&Base::buf[i]) Cell(rhs.buf[i]);
}
/// NOTE: Currently this method isn't used. When it does, the ReadBuffer should
/// contain the Key explicitly.
// void readAndMerge(DB::ReadBuffer & rb)
// {
// }
};

View File

@ -16,7 +16,6 @@ namespace DB
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
extern const int TOO_LARGE_ARRAY_SIZE;
}
}
@ -55,26 +54,6 @@ public:
if (!rhs.buf[i].isZero(*this))
this->insert(rhs.buf[i].getValue());
}
void readAndMerge(DB::ReadBuffer & rb)
{
Cell::State::read(rb);
size_t new_size = 0;
DB::readVarUInt(new_size, rb);
if (new_size > 100'000'000'000)
throw DB::Exception(DB::ErrorCodes::TOO_LARGE_ARRAY_SIZE, "The size of serialized hash table is suspiciously large: {}", new_size);
this->resize(new_size);
for (size_t i = 0; i < new_size; ++i)
{
Cell x;
x.read(rb);
this->insert(x.getValue());
}
}
};

View File

@ -353,18 +353,6 @@ public:
}
}
void readAndMerge(DB::ReadBuffer & in)
{
typename RankStore::Reader reader(in);
while (reader.next())
{
const auto & data = reader.get();
update(data.first, data.second);
}
in.ignore(sizeof(DenominatorCalculatorType) + sizeof(ZerosCounterType));
}
static void skip(DB::ReadBuffer & in)
{
in.ignore(sizeof(RankStore) + sizeof(DenominatorCalculatorType) + sizeof(ZerosCounterType));

View File

@ -113,24 +113,6 @@ public:
small.read(in);
}
void readAndMerge(DB::ReadBuffer & in)
{
bool is_rhs_large;
readBinary(is_rhs_large, in);
if (!isLarge() && is_rhs_large)
toLarge();
if (!is_rhs_large)
{
typename Small::Reader reader(in);
while (reader.next())
insert(reader.get());
}
else
large->readAndMerge(in);
}
void write(DB::WriteBuffer & out) const
{
writeBinary(isLarge(), out);

View File

@ -58,36 +58,4 @@ void BlockInfo::read(ReadBuffer & in)
}
}
void BlockMissingValues::setBit(size_t column_idx, size_t row_idx)
{
RowsBitMask & mask = rows_mask_by_column_id[column_idx];
mask.resize(row_idx + 1);
mask[row_idx] = true;
}
void BlockMissingValues::setBits(size_t column_idx, size_t rows)
{
RowsBitMask & mask = rows_mask_by_column_id[column_idx];
mask.resize(rows);
std::fill(mask.begin(), mask.end(), true);
}
const BlockMissingValues::RowsBitMask & BlockMissingValues::getDefaultsBitmask(size_t column_idx) const
{
static RowsBitMask none;
auto it = rows_mask_by_column_id.find(column_idx);
if (it != rows_mask_by_column_id.end())
return it->second;
return none;
}
bool BlockMissingValues::hasDefaultBits(size_t column_idx) const
{
auto it = rows_mask_by_column_id.find(column_idx);
if (it == rows_mask_by_column_id.end())
return false;
const auto & col_mask = it->second;
return std::find(col_mask.begin(), col_mask.end(), true) != col_mask.end();
}
}

View File

@ -2,10 +2,6 @@
#include <base/types.h>
#include <unordered_map>
#include <vector>
namespace DB
{
@ -46,30 +42,4 @@ struct BlockInfo
void read(ReadBuffer & in);
};
/// Block extension to support delayed defaults. AddingDefaultsTransform uses it to replace missing values with column defaults.
class BlockMissingValues
{
public:
using RowsBitMask = std::vector<bool>; /// a bit per row for a column
/// Get mask for column, column_idx is index inside corresponding block
const RowsBitMask & getDefaultsBitmask(size_t column_idx) const;
/// Check that we have to replace default value at least in one of columns
bool hasDefaultBits(size_t column_idx) const;
/// Set bit for a specified row in a single column.
void setBit(size_t column_idx, size_t row_idx);
/// Set bits for all rows in a single column.
void setBits(size_t column_idx, size_t rows);
bool empty() const { return rows_mask_by_column_id.empty(); }
size_t size() const { return rows_mask_by_column_id.size(); }
void clear() { rows_mask_by_column_id.clear(); }
private:
using RowsMaskByColumnId = std::unordered_map<size_t, RowsBitMask>;
/// If rows_mask_by_column_id[column_id][row_id] is true related value in Block should be replaced with column default.
/// It could contain less columns and rows then related block.
RowsMaskByColumnId rows_mask_by_column_id;
};
}

View File

@ -0,0 +1,53 @@
#include <Core/BlockMissingValues.h>
namespace DB
{
void BlockMissingValues::setBit(size_t column_idx, size_t row_idx)
{
RowsBitMask & mask = rows_mask_by_column_id[column_idx];
mask.resize(row_idx + 1);
mask.set(row_idx, true);
}
void BlockMissingValues::setBits(size_t column_idx, size_t rows)
{
auto & mask = rows_mask_by_column_id[column_idx];
mask.set(0, std::min(mask.size(), rows), true);
mask.resize(rows, true);
}
const BlockMissingValues::RowsBitMask & BlockMissingValues::getDefaultsBitmask(size_t column_idx) const
{
return rows_mask_by_column_id[column_idx];
}
bool BlockMissingValues::hasDefaultBits(size_t column_idx) const
{
/// It is correct because we resize bitmask only when set a bit.
return !rows_mask_by_column_id[column_idx].empty();
}
void BlockMissingValues::clear()
{
for (auto & mask : rows_mask_by_column_id)
mask.clear();
}
bool BlockMissingValues::empty() const
{
return std::ranges::all_of(rows_mask_by_column_id, [&](const auto & mask)
{
return mask.empty();
});
}
size_t BlockMissingValues::size() const
{
size_t res = 0;
for (const auto & mask : rows_mask_by_column_id)
res += !mask.empty();
return res;
}
}

View File

@ -0,0 +1,44 @@
#pragma once
#include <vector>
#include <boost/dynamic_bitset.hpp>
#include <Processors/Chunk.h>
namespace DB
{
/// Block extension to support delayed defaults.
/// AddingDefaultsTransform uses it to replace missing values with column defaults.
class BlockMissingValues
{
public:
using RowsBitMask = boost::dynamic_bitset<>; /// a bit per row for a column
explicit BlockMissingValues(size_t num_columns) : rows_mask_by_column_id(num_columns) {}
/// Get mask for column, column_idx is index inside corresponding block
const RowsBitMask & getDefaultsBitmask(size_t column_idx) const;
/// Check that we have to replace default value at least in one of columns
bool hasDefaultBits(size_t column_idx) const;
/// Set bit for a specified row in a single column.
void setBit(size_t column_idx, size_t row_idx);
/// Set bits for all rows in a single column.
void setBits(size_t column_idx, size_t rows);
void clear();
bool empty() const;
size_t size() const;
private:
using RowsMaskByColumnId = std::vector<RowsBitMask>;
/// If rows_mask_by_column_id[column_id][row_id] is true related value in Block should be replaced with column default.
/// It could contain less rows than related block.
RowsMaskByColumnId rows_mask_by_column_id;
};
/// The same as above but can be used as a chunk info.
class ChunkMissingValues : public BlockMissingValues, public ChunkInfoCloneable<ChunkMissingValues>
{
};
}

View File

@ -360,6 +360,8 @@ namespace ErrorCodes
M(Int64, distributed_ddl_task_timeout, 180, "Timeout for DDL query responses from all hosts in the cluster. If a ddl request has not been performed on all hosts, a response will contain a timeout error and a request will be executed in an async mode. A negative value means infinite. Zero means async mode.", 0) \
M(Milliseconds, stream_flush_interval_ms, 7500, "Timeout for flushing data from streaming storages.", 0) \
M(Milliseconds, stream_poll_timeout_ms, 500, "Timeout for polling data from/to streaming storages.", 0) \
M(UInt64, min_free_disk_bytes_to_perform_insert, 0, "Minimum free disk space bytes to perform an insert.", 0) \
M(Double, min_free_disk_ratio_to_perform_insert, 0.0, "Minimum free disk space ratio to perform an insert.", 0) \
\
M(Bool, final, false, "Query with the FINAL modifier by default. If the engine does not support the FINAL, it does not have any effect. On queries with multiple tables, FINAL is applied only to those that support it. It also works on distributed tables", 0) \
\
@ -919,9 +921,9 @@ namespace ErrorCodes
M(Bool, restore_replace_external_table_functions_to_null, false, "Replace all table functions to Null on restore. Useful for testing purposes", 0) \
M(Bool, restore_replace_external_dictionary_source_to_null, false, "Replace external dictionary sources to Null on restore. Useful for testing purposes", 0) \
M(Bool, create_if_not_exists, false, "Enable IF NOT EXISTS for CREATE statements by default", 0) \
M(Bool, enable_secure_identifiers, false, "If enabled, only allow secure identifiers which contain only underscore and alphanumeric characters", 0) \
M(Bool, mongodb_throw_on_unsupported_query, true, "If enabled, MongoDB tables will return an error when a MongoDB query cannot be built. Otherwise, ClickHouse reads the full table and processes it locally. This option does not apply to the legacy implementation or when 'allow_experimental_analyzer=0'.", 0) \
\
\
/* ###################################### */ \
/* ######## EXPERIMENTAL FEATURES ####### */ \
/* ###################################### */ \

View File

@ -75,6 +75,9 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{"show_create_query_identifier_quoting_rule", "when_necessary", "when_necessary", "New setting."},
{"show_create_query_identifier_quoting_style", "Backticks", "Backticks", "New setting."},
{"query_plan_join_inner_table_selection", "auto", "auto", "New setting."},
{"enable_secure_identifiers", false, false, "New setting."},
{"min_free_disk_bytes_to_perform_insert", 0, 0, "New setting."},
{"min_free_disk_ratio_to_perform_insert", 0.0, 0.0, "New setting."},
}
},
{"24.9",
@ -94,6 +97,8 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{"allow_experimental_join_right_table_sorting", false, false, "If it is set to true, and the conditions of `join_to_sort_minimum_perkey_rows` and `join_to_sort_maximum_table_rows` are met, rerange the right table by key to improve the performance in left or inner hash join"},
{"mongodb_throw_on_unsupported_query", false, true, "New setting."},
{"allow_experimental_join_right_table_sorting", false, false, "If it is set to true, and the conditions of `join_to_sort_minimum_perkey_rows` and `join_to_sort_maximum_table_rows` are met, rerange the right table by key to improve the performance in left or inner hash join"}
{"min_free_disk_bytes_to_perform_insert", 0, 0, "Maintain some free disk space bytes from inserts while still allowing for temporary writing."},
{"min_free_disk_ratio_to_perform_insert", 0.0, 0.0, "Maintain some free disk space bytes expressed as ratio to total disk space from inserts while still allowing for temporary writing."},
}
},
{"24.8",

View File

@ -24,7 +24,8 @@
#if USE_SIMDJSON
# include <Common/JSONParsers/SimdJSONParser.h>
#elif USE_RAPIDJSON
#endif
#if USE_RAPIDJSON
# include <Common/JSONParsers/RapidJSONParser.h>
#else
# include <Common/JSONParsers/DummyJSONParser.h>
@ -36,6 +37,7 @@ namespace Setting
{
extern const SettingsBool allow_experimental_object_type;
extern const SettingsBool use_json_alias_for_old_object_type;
extern const SettingsBool allow_simdjson;
}
namespace ErrorCodes
@ -127,12 +129,18 @@ SerializationPtr DataTypeObject::doGetDefaultSerialization() const
{
case SchemaFormat::JSON:
#if USE_SIMDJSON
return std::make_shared<SerializationJSON<SimdJSONParser>>(
std::move(typed_path_serializations),
paths_to_skip,
path_regexps_to_skip,
buildJSONExtractTree<SimdJSONParser>(getPtr(), "JSON serialization"));
#elif USE_RAPIDJSON
auto context = CurrentThread::getQueryContext();
if (!context)
context = Context::getGlobalContextInstance();
if (context->getSettingsRef()[Setting::allow_simdjson])
return std::make_shared<SerializationJSON<SimdJSONParser>>(
std::move(typed_path_serializations),
paths_to_skip,
path_regexps_to_skip,
buildJSONExtractTree<SimdJSONParser>(getPtr(), "JSON serialization"));
#endif
#if USE_RAPIDJSON
return std::make_shared<SerializationJSON<RapidJSONParser>>(
std::move(typed_path_serializations),
paths_to_skip,
@ -404,7 +412,7 @@ std::unique_ptr<ISerialization::SubstreamData> DataTypeObject::getDynamicSubcolu
else
{
res = std::make_unique<SubstreamData>(std::make_shared<SerializationDynamic>());
res->type = std::make_shared<DataTypeDynamic>();
res->type = std::make_shared<DataTypeDynamic>(max_dynamic_types);
}
/// If column was provided, we should create a column for requested subcolumn.

View File

@ -4,6 +4,7 @@
#include <Formats/MarkInCompressedFile.h>
#include <Common/PODArray.h>
#include <Core/Block.h>
#include <Core/BlockMissingValues.h>
namespace DB
{

View File

@ -1,6 +1,7 @@
#pragma once
#include <Core/Block.h>
#include <Core/BlockMissingValues.h>
namespace DB
{

View File

@ -1,5 +1,6 @@
#pragma once
#include <base/arithmeticOverflow.h>
#include <base/types.h>
#include <Core/DecimalFunctions.h>
#include <Common/Exception.h>
@ -178,7 +179,7 @@ struct ToStartOfDayImpl
}
static Int64 executeExtendedResult(Int32 d, const DateLUTImpl & time_zone)
{
return time_zone.fromDayNum(ExtendedDayNum(d)) * DecimalUtils::scaleMultiplier<DateTime64>(DataTypeDateTime64::default_scale);
return common::mulIgnoreOverflow(time_zone.fromDayNum(ExtendedDayNum(d)), DecimalUtils::scaleMultiplier<DateTime64>(DataTypeDateTime64::default_scale));
}
using FactorTransform = ZeroTransform;
@ -1980,22 +1981,19 @@ struct ToRelativeSubsecondNumImpl
return t.value;
if (scale > scale_multiplier)
return t.value / (scale / scale_multiplier);
return static_cast<UInt128>(t.value) * static_cast<UInt128>((scale_multiplier / scale));
/// Casting ^^: All integers are Int64, yet if t.value is big enough the multiplication can still
/// overflow which is UB. This place is too low-level and generic to check if t.value is sane.
/// Therefore just let it overflow safely and don't bother further.
return common::mulIgnoreOverflow(t.value, scale_multiplier / scale);
}
static Int64 execute(UInt32 t, const DateLUTImpl &)
{
return t * scale_multiplier;
return common::mulIgnoreOverflow(static_cast<Int64>(t), scale_multiplier);
}
static Int64 execute(Int32 d, const DateLUTImpl & time_zone)
{
return static_cast<Int64>(time_zone.fromDayNum(ExtendedDayNum(d))) * scale_multiplier;
return common::mulIgnoreOverflow(static_cast<Int64>(time_zone.fromDayNum(ExtendedDayNum(d))), scale_multiplier);
}
static Int64 execute(UInt16 d, const DateLUTImpl & time_zone)
{
return static_cast<Int64>(time_zone.fromDayNum(DayNum(d)) * scale_multiplier);
return common::mulIgnoreOverflow(static_cast<Int64>(time_zone.fromDayNum(DayNum(d))), scale_multiplier);
}
using FactorTransform = ZeroTransform;

View File

@ -513,12 +513,6 @@ static void validateUpdateColumns(
throw Exception(ErrorCodes::NO_SUCH_COLUMN_IN_TABLE, "There is no column {} in table", backQuote(column_name));
}
}
else if (storage_columns.getColumn(GetColumnsOptions::Ordinary, column_name).type->hasDynamicSubcolumns())
{
throw Exception(ErrorCodes::CANNOT_UPDATE_COLUMN,
"Cannot update column {} with type {}: updates of columns with dynamic subcolumns are not supported",
backQuote(column_name), storage_columns.getColumn(GetColumnsOptions::Ordinary, column_name).type->getName());
}
}
}
@ -1365,6 +1359,21 @@ void MutationsInterpreter::validate()
}
}
const auto & storage_columns = source.getStorageSnapshot(metadata_snapshot, context)->metadata->getColumns();
for (const auto & command : commands)
{
for (const auto & [column_name, _] : command.column_to_update_expression)
{
auto column = storage_columns.tryGetColumn(GetColumnsOptions::Ordinary, column_name);
if (column && column->type->hasDynamicSubcolumns())
{
throw Exception(ErrorCodes::CANNOT_UPDATE_COLUMN,
"Cannot update column {} with type {}: updates of columns with dynamic subcolumns are not supported",
backQuote(column_name), storage_columns.getColumn(GetColumnsOptions::Ordinary, column_name).type->getName());
}
}
}
QueryPlan plan;
initQueryPlan(stages.front(), plan);

View File

@ -154,6 +154,7 @@ namespace Setting
extern const SettingsBool use_query_cache;
extern const SettingsBool wait_for_async_insert;
extern const SettingsSeconds wait_for_async_insert_timeout;
extern const SettingsBool enable_secure_identifiers;
}
namespace ErrorCodes
@ -997,6 +998,14 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
InterpreterSetQuery::applySettingsFromQuery(ast, context);
validateAnalyzerSettings(ast, settings[Setting::allow_experimental_analyzer]);
if (settings[Setting::enable_secure_identifiers])
{
WriteBufferFromOwnString buf;
IAST::FormatSettings enable_secure_identifiers_settings(buf, true);
enable_secure_identifiers_settings.enable_secure_identifiers = true;
ast->format(enable_secure_identifiers_settings);
}
if (auto * insert_query = ast->as<ASTInsertQuery>())
insert_query->tail = istr;

View File

@ -216,7 +216,8 @@ bool ASTAuthenticationData::hasSecretParts() const
auto auth_type = *type;
if ((auth_type == AuthenticationType::PLAINTEXT_PASSWORD)
|| (auth_type == AuthenticationType::SHA256_PASSWORD)
|| (auth_type == AuthenticationType::DOUBLE_SHA1_PASSWORD))
|| (auth_type == AuthenticationType::DOUBLE_SHA1_PASSWORD)
|| (auth_type == AuthenticationType::BCRYPT_PASSWORD))
return true;
return childrenHaveSecretParts();

View File

@ -8,6 +8,7 @@
#include <Poco/String.h>
#include <Common/SensitiveDataMasker.h>
#include <Common/SipHash.h>
#include <algorithm>
namespace DB
{
@ -17,6 +18,7 @@ namespace ErrorCodes
extern const int TOO_BIG_AST;
extern const int TOO_DEEP_AST;
extern const int UNKNOWN_ELEMENT_IN_AST;
extern const int BAD_ARGUMENTS;
}
@ -219,6 +221,7 @@ String IAST::getColumnNameWithoutAlias() const
void IAST::FormatSettings::writeIdentifier(const String & name, bool ambiguous) const
{
checkIdentifier(name);
bool must_quote
= (identifier_quoting_rule == IdentifierQuotingRule::Always
|| (ambiguous && identifier_quoting_rule == IdentifierQuotingRule::WhenNecessary));
@ -259,6 +262,21 @@ void IAST::FormatSettings::writeIdentifier(const String & name, bool ambiguous)
}
}
void IAST::FormatSettings::checkIdentifier(const String & name) const
{
if (enable_secure_identifiers)
{
bool is_secure_identifier = std::all_of(name.begin(), name.end(), [](char ch) { return std::isalnum(ch) || ch == '_'; });
if (!is_secure_identifier)
{
throw Exception(
ErrorCodes::BAD_ARGUMENTS,
"Not a secure identifier: `{}`, a secure identifier must contain only underscore and alphanumeric characters",
name);
}
}
}
void IAST::dumpTree(WriteBuffer & ostr, size_t indent) const
{
String indent_str(indent, '-');

View File

@ -202,6 +202,7 @@ public:
char nl_or_ws; /// Newline or whitespace.
LiteralEscapingStyle literal_escaping_style;
bool print_pretty_type_names;
bool enable_secure_identifiers;
explicit FormatSettings(
WriteBuffer & ostr_,
@ -211,7 +212,8 @@ public:
IdentifierQuotingStyle identifier_quoting_style_ = IdentifierQuotingStyle::Backticks,
bool show_secrets_ = true,
LiteralEscapingStyle literal_escaping_style_ = LiteralEscapingStyle::Regular,
bool print_pretty_type_names_ = false)
bool print_pretty_type_names_ = false,
bool enable_secure_identifiers_ = false)
: ostr(ostr_)
, one_line(one_line_)
, hilite(hilite_)
@ -221,6 +223,7 @@ public:
, nl_or_ws(one_line ? ' ' : '\n')
, literal_escaping_style(literal_escaping_style_)
, print_pretty_type_names(print_pretty_type_names_)
, enable_secure_identifiers(enable_secure_identifiers_)
{
}
@ -234,10 +237,12 @@ public:
, nl_or_ws(other.nl_or_ws)
, literal_escaping_style(other.literal_escaping_style)
, print_pretty_type_names(other.print_pretty_type_names)
, enable_secure_identifiers(other.enable_secure_identifiers)
{
}
void writeIdentifier(const String & name, bool ambiguous) const;
void checkIdentifier(const String & name) const;
};
/// State. For example, a set of nodes can be remembered, which we already walk through.

View File

@ -178,22 +178,6 @@ void Chunk::append(const Chunk & chunk, size_t from, size_t length)
setColumns(std::move(mutable_columns), rows);
}
void ChunkMissingValues::setBit(size_t column_idx, size_t row_idx)
{
RowsBitMask & mask = rows_mask_by_column_id[column_idx];
mask.resize(row_idx + 1);
mask[row_idx] = true;
}
const ChunkMissingValues::RowsBitMask & ChunkMissingValues::getDefaultsBitmask(size_t column_idx) const
{
static RowsBitMask none;
auto it = rows_mask_by_column_id.find(column_idx);
if (it != rows_mask_by_column_id.end())
return it->second;
return none;
}
void convertToFullIfConst(Chunk & chunk)
{
size_t num_rows = chunk.getNumRows();

View File

@ -153,28 +153,6 @@ public:
using AsyncInsertInfoPtr = std::shared_ptr<AsyncInsertInfo>;
/// Extension to support delayed defaults. AddingDefaultsProcessor uses it to replace missing values with column defaults.
class ChunkMissingValues : public ChunkInfoCloneable<ChunkMissingValues>
{
public:
ChunkMissingValues(const ChunkMissingValues & other) = default;
using RowsBitMask = std::vector<bool>; /// a bit per row for a column
const RowsBitMask & getDefaultsBitmask(size_t column_idx) const;
void setBit(size_t column_idx, size_t row_idx);
bool empty() const { return rows_mask_by_column_id.empty(); }
size_t size() const { return rows_mask_by_column_id.size(); }
void clear() { rows_mask_by_column_id.clear(); }
private:
using RowsMaskByColumnId = std::unordered_map<size_t, RowsBitMask>;
/// If rows_mask_by_column_id[column_id][row_id] is true related value in Block should be replaced with column default.
/// It could contain less columns and rows then related block.
RowsMaskByColumnId rows_mask_by_column_id;
};
/// Converts all columns to full serialization in chunk.
/// It's needed, when you have to access to the internals of the column,
/// or when you need to perform operation with two columns

View File

@ -6,6 +6,7 @@
#include <Processors/Formats/InputFormatErrorsLogger.h>
#include <Processors/SourceWithKeyCondition.h>
#include <Storages/MergeTree/KeyCondition.h>
#include <Core/BlockMissingValues.h>
namespace DB
@ -43,11 +44,7 @@ public:
virtual void setReadBuffer(ReadBuffer & in_);
virtual void resetReadBuffer() { in = nullptr; }
virtual const BlockMissingValues & getMissingValues() const
{
static const BlockMissingValues none;
return none;
}
virtual const BlockMissingValues * getMissingValues() const { return nullptr; }
/// Must be called from ParallelParsingInputFormat after readSuffix
ColumnMappingPtr getColumnMapping() const { return column_mapping; }

View File

@ -56,7 +56,10 @@ bool isParseError(int code)
}
IRowInputFormat::IRowInputFormat(Block header, ReadBuffer & in_, Params params_)
: IInputFormat(std::move(header), &in_), serializations(getPort().getHeader().getSerializations()), params(params_)
: IInputFormat(std::move(header), &in_)
, serializations(getPort().getHeader().getSerializations())
, params(params_)
, block_missing_values(getPort().getHeader().columns())
{
}

View File

@ -78,7 +78,7 @@ protected:
void logError();
const BlockMissingValues & getMissingValues() const override { return block_missing_values; }
const BlockMissingValues * getMissingValues() const override { return &block_missing_values; }
size_t getRowNum() const { return total_rows; }

View File

@ -24,7 +24,10 @@ namespace ErrorCodes
}
ArrowBlockInputFormat::ArrowBlockInputFormat(ReadBuffer & in_, const Block & header_, bool stream_, const FormatSettings & format_settings_)
: IInputFormat(header_, &in_), stream{stream_}, format_settings(format_settings_)
: IInputFormat(header_, &in_)
, stream(stream_)
, block_missing_values(getPort().getHeader().columns())
, format_settings(format_settings_)
{
}
@ -108,9 +111,9 @@ void ArrowBlockInputFormat::resetParser()
block_missing_values.clear();
}
const BlockMissingValues & ArrowBlockInputFormat::getMissingValues() const
const BlockMissingValues * ArrowBlockInputFormat::getMissingValues() const
{
return block_missing_values;
return &block_missing_values;
}
static std::shared_ptr<arrow::RecordBatchReader> createStreamReader(ReadBuffer & in)

View File

@ -25,7 +25,7 @@ public:
String getName() const override { return "ArrowBlockInputFormat"; }
const BlockMissingValues & getMissingValues() const override;
const BlockMissingValues * getMissingValues() const override;
size_t getApproxBytesReadForChunk() const override { return approx_bytes_read_for_chunk; }

View File

@ -9,6 +9,7 @@
#include <Core/Block.h>
#include <arrow/table.h>
#include <Formats/FormatSettings.h>
#include <Core/BlockMissingValues.h>
namespace DB
{

View File

@ -84,6 +84,7 @@ JSONColumnsBlockInputFormatBase::JSONColumnsBlockInputFormatBase(
, fields(header_.getNamesAndTypes())
, serializations(header_.getSerializations())
, reader(std::move(reader_))
, block_missing_values(getPort().getHeader().columns())
{
name_to_index = getPort().getHeader().getNamesToIndexesMap();
}

View File

@ -52,7 +52,7 @@ public:
void setReadBuffer(ReadBuffer & in_) override;
const BlockMissingValues & getMissingValues() const override { return block_missing_values; }
const BlockMissingValues * getMissingValues() const override { return &block_missing_values; }
size_t getApproxBytesReadForChunk() const override { return approx_bytes_read_for_chunk; }

View File

@ -23,7 +23,10 @@ public:
0,
settings,
settings.defaults_for_omitted_fields ? &block_missing_values : nullptr))
, header(header_) {}
, header(header_)
, block_missing_values(header.columns())
{
}
String getName() const override { return "Native"; }
@ -56,7 +59,7 @@ public:
IInputFormat::setReadBuffer(in_);
}
const BlockMissingValues & getMissingValues() const override { return block_missing_values; }
const BlockMissingValues * getMissingValues() const override { return &block_missing_values; }
size_t getApproxBytesReadForChunk() const override { return approx_bytes_read_for_chunk; }

View File

@ -846,7 +846,10 @@ static void updateIncludeTypeIds(
}
NativeORCBlockInputFormat::NativeORCBlockInputFormat(ReadBuffer & in_, Block header_, const FormatSettings & format_settings_)
: IInputFormat(std::move(header_), &in_), format_settings(format_settings_), skip_stripes(format_settings.orc.skip_stripes)
: IInputFormat(std::move(header_), &in_)
, block_missing_values(getPort().getHeader().columns())
, format_settings(format_settings_)
, skip_stripes(format_settings.orc.skip_stripes)
{
}
@ -975,9 +978,9 @@ void NativeORCBlockInputFormat::resetParser()
block_missing_values.clear();
}
const BlockMissingValues & NativeORCBlockInputFormat::getMissingValues() const
const BlockMissingValues * NativeORCBlockInputFormat::getMissingValues() const
{
return block_missing_values;
return &block_missing_values;
}
NativeORCSchemaReader::NativeORCSchemaReader(ReadBuffer & in_, const FormatSettings & format_settings_)

View File

@ -57,7 +57,7 @@ public:
void resetParser() override;
const BlockMissingValues & getMissingValues() const override;
const BlockMissingValues * getMissingValues() const override;
size_t getApproxBytesReadForChunk() const override { return approx_bytes_read_for_chunk; }

View File

@ -23,7 +23,10 @@ namespace ErrorCodes
}
ORCBlockInputFormat::ORCBlockInputFormat(ReadBuffer & in_, Block header_, const FormatSettings & format_settings_)
: IInputFormat(std::move(header_), &in_), format_settings(format_settings_), skip_stripes(format_settings.orc.skip_stripes)
: IInputFormat(std::move(header_), &in_)
, block_missing_values(getPort().getHeader().columns())
, format_settings(format_settings_)
, skip_stripes(format_settings.orc.skip_stripes)
{
}
@ -86,9 +89,9 @@ void ORCBlockInputFormat::resetParser()
block_missing_values.clear();
}
const BlockMissingValues & ORCBlockInputFormat::getMissingValues() const
const BlockMissingValues * ORCBlockInputFormat::getMissingValues() const
{
return block_missing_values;
return &block_missing_values;
}

View File

@ -27,7 +27,7 @@ public:
void resetParser() override;
const BlockMissingValues & getMissingValues() const override;
const BlockMissingValues * getMissingValues() const override;
size_t getApproxBytesReadForChunk() const override { return approx_bytes_read_for_chunk; }

View File

@ -111,7 +111,12 @@ void ParallelParsingInputFormat::parserThreadFunction(ThreadGroupPtr thread_grou
/// Variable chunk is moved, but it is not really used in the next iteration.
/// NOLINTNEXTLINE(bugprone-use-after-move, hicpp-invalid-access-moved)
unit.chunk_ext.chunk.emplace_back(std::move(chunk));
unit.chunk_ext.block_missing_values.emplace_back(parser.getMissingValues());
if (const auto * block_missing_values = parser.getMissingValues())
unit.chunk_ext.block_missing_values.emplace_back(*block_missing_values);
else
unit.chunk_ext.block_missing_values.emplace_back(chunk.getNumColumns());
size_t approx_chunk_size = input_format->getApproxBytesReadForChunk();
/// We could decompress data during file segmentation.
/// Correct chunk size using original segment size.

View File

@ -103,6 +103,7 @@ public:
, format_settings(params.format_settings)
, min_chunk_bytes(params.min_chunk_bytes)
, max_block_size(params.max_block_size)
, last_block_missing_values(getPort().getHeader().columns())
, is_server(params.is_server)
, pool(CurrentMetrics::ParallelParsingInputFormatThreads, CurrentMetrics::ParallelParsingInputFormatThreadsActive, CurrentMetrics::ParallelParsingInputFormatThreadsScheduled, params.max_threads)
{
@ -124,9 +125,9 @@ public:
throw Exception(ErrorCodes::LOGICAL_ERROR, "resetParser() is not allowed for {}", getName());
}
const BlockMissingValues & getMissingValues() const final
const BlockMissingValues * getMissingValues() const final
{
return last_block_missing_values;
return &last_block_missing_values;
}
void setSerializationHints(const SerializationInfoByName & hints) override
@ -195,7 +196,7 @@ private:
}
}
const BlockMissingValues & getMissingValues() const { return input_format->getMissingValues(); }
const BlockMissingValues * getMissingValues() const { return input_format->getMissingValues(); }
private:
const InputFormatPtr & input_format;

View File

@ -441,6 +441,7 @@ ParquetBlockInputFormat::ParquetBlockInputFormat(
, max_decoding_threads(max_decoding_threads_)
, min_bytes_for_seek(min_bytes_for_seek_)
, pending_chunks(PendingChunk::Compare { .row_group_first = format_settings_.parquet.preserve_order })
, previous_block_missing_values(getPort().getHeader().columns())
{
if (max_decoding_threads > 1)
pool = std::make_unique<ThreadPool>(CurrentMetrics::ParquetDecoderThreads, CurrentMetrics::ParquetDecoderThreadsActive, CurrentMetrics::ParquetDecoderThreadsScheduled, max_decoding_threads);
@ -680,23 +681,19 @@ void ParquetBlockInputFormat::decodeOneChunk(size_t row_group_batch_idx, std::un
// reached. Wake up read() instead.
condvar.notify_all();
};
auto get_pending_chunk = [&](size_t num_rows, Chunk chunk = {})
auto get_approx_original_chunk_size = [&](size_t num_rows)
{
size_t approx_chunk_original_size = static_cast<size_t>(std::ceil(
static_cast<double>(row_group_batch.total_bytes_compressed) / row_group_batch.total_rows * num_rows));
return PendingChunk{
.chunk = std::move(chunk),
.block_missing_values = {},
.chunk_idx = row_group_batch.next_chunk_idx,
.row_group_batch_idx = row_group_batch_idx,
.approx_original_chunk_size = approx_chunk_original_size
};
return static_cast<size_t>(std::ceil(static_cast<double>(row_group_batch.total_bytes_compressed) / row_group_batch.total_rows * num_rows));
};
if (!row_group_batch.record_batch_reader && !row_group_batch.native_record_reader)
initializeRowGroupBatchReader(row_group_batch_idx);
PendingChunk res;
PendingChunk res(getPort().getHeader().columns());
res.chunk_idx = row_group_batch.next_chunk_idx;
res.row_group_batch_idx = row_group_batch_idx;
if (format_settings.parquet.use_native_reader)
{
auto chunk = row_group_batch.native_record_reader->readChunk();
@ -706,9 +703,9 @@ void ParquetBlockInputFormat::decodeOneChunk(size_t row_group_batch_idx, std::un
return;
}
// TODO support defaults_for_omitted_fields feature when supporting nested columns
auto num_rows = chunk.getNumRows();
res = get_pending_chunk(num_rows, std::move(chunk));
/// TODO: support defaults_for_omitted_fields feature when supporting nested columns
res.approx_original_chunk_size = get_approx_original_chunk_size(chunk.getNumRows());
res.chunk = std::move(chunk);
}
else
{
@ -723,11 +720,11 @@ void ParquetBlockInputFormat::decodeOneChunk(size_t row_group_batch_idx, std::un
}
auto tmp_table = arrow::Table::FromRecordBatches({*batch});
res = get_pending_chunk((*tmp_table)->num_rows());
/// If defaults_for_omitted_fields is true, calculate the default values from default expression for omitted fields.
/// Otherwise fill the missing columns with zero values of its type.
BlockMissingValues * block_missing_values_ptr = format_settings.defaults_for_omitted_fields ? &res.block_missing_values : nullptr;
res.approx_original_chunk_size = get_approx_original_chunk_size((*tmp_table)->num_rows());
res.chunk = row_group_batch.arrow_column_to_ch_column->arrowTableToCHChunk(*tmp_table, (*tmp_table)->num_rows(), block_missing_values_ptr);
}
@ -841,9 +838,9 @@ void ParquetBlockInputFormat::resetParser()
IInputFormat::resetParser();
}
const BlockMissingValues & ParquetBlockInputFormat::getMissingValues() const
const BlockMissingValues * ParquetBlockInputFormat::getMissingValues() const
{
return previous_block_missing_values;
return &previous_block_missing_values;
}
ParquetSchemaReader::ParquetSchemaReader(ReadBuffer & in_, const FormatSettings & format_settings_)

View File

@ -63,7 +63,7 @@ public:
String getName() const override { return "ParquetBlockInputFormat"; }
const BlockMissingValues & getMissingValues() const override;
const BlockMissingValues * getMissingValues() const override;
size_t getApproxBytesReadForChunk() const override { return previous_approx_bytes_read_for_chunk; }
@ -226,6 +226,8 @@ private:
// Chunk ready to be delivered by read().
struct PendingChunk
{
explicit PendingChunk(size_t num_columns) : block_missing_values(num_columns) {}
Chunk chunk;
BlockMissingValues block_missing_values;
size_t chunk_idx; // within row group

View File

@ -57,6 +57,7 @@ ValuesBlockInputFormat::ValuesBlockInputFormat(
parser_type_for_column(num_columns, ParserType::Streaming),
attempts_to_deduce_template(num_columns), attempts_to_deduce_template_cached(num_columns),
rows_parsed_using_template(num_columns), templates(num_columns), types(header_.getDataTypes()), serializations(header_.getSerializations())
, block_missing_values(getPort().getHeader().columns())
{
}

View File

@ -40,7 +40,7 @@ public:
void setContext(const ContextPtr & context_) { context = Context::createCopy(context_); }
void setQueryParameters(const NameToNameMap & parameters);
const BlockMissingValues & getMissingValues() const override { return block_missing_values; }
const BlockMissingValues * getMissingValues() const override { return &block_missing_values; }
size_t getApproxBytesReadForChunk() const override { return approx_bytes_read_for_chunk; }

View File

@ -63,17 +63,20 @@ namespace DB
processed_stage,
max_block_size,
num_streams);
auto builder = plan.buildQueryPipeline(
QueryPlanOptimizationSettings::fromContext(context),
BuildQueryPipelineSettings::fromContext(context));
QueryPlanResourceHolder resources;
auto pipe = QueryPipelineBuilder::getPipe(std::move(*builder), resources);
query_pipeline = QueryPipeline(std::move(pipe));
executor = std::make_unique<PullingPipelineExecutor>(query_pipeline);
if (plan.isInitialized())
{
auto builder = plan.buildQueryPipeline(
QueryPlanOptimizationSettings::fromContext(context),
BuildQueryPipelineSettings::fromContext(context));
QueryPlanResourceHolder resources;
auto pipe = QueryPipelineBuilder::getPipe(std::move(*builder), resources);
query_pipeline = QueryPipeline(std::move(pipe));
executor = std::make_unique<PullingPipelineExecutor>(query_pipeline);
}
loop = true;
}
Chunk chunk;
if (executor->pull(chunk))
if (executor && executor->pull(chunk))
{
if (chunk)
{

View File

@ -149,8 +149,8 @@ void AddingDefaultsTransform::transform(Chunk & chunk)
if (column_defaults.empty())
return;
const BlockMissingValues & block_missing_values = input_format.getMissingValues();
if (block_missing_values.empty())
const auto * block_missing_values = input_format.getMissingValues();
if (!block_missing_values)
return;
const auto & header = getOutputPort().getHeader();
@ -167,7 +167,7 @@ void AddingDefaultsTransform::transform(Chunk & chunk)
if (evaluate_block.has(column.first))
{
size_t column_idx = res.getPositionByName(column.first);
if (block_missing_values.hasDefaultBits(column_idx))
if (block_missing_values->hasDefaultBits(column_idx))
evaluate_block.erase(column.first);
}
}
@ -193,7 +193,7 @@ void AddingDefaultsTransform::transform(Chunk & chunk)
size_t block_column_position = res.getPositionByName(column_name);
ColumnWithTypeAndName & column_read = res.getByPosition(block_column_position);
const auto & defaults_mask = block_missing_values.getDefaultsBitmask(block_column_position);
const auto & defaults_mask = block_missing_values->getDefaultsBitmask(block_column_position);
checkCalculated(column_read, column_def, defaults_mask.size());

View File

@ -35,6 +35,7 @@
#include <Parsers/queryToString.h>
#include <Storages/AlterCommands.h>
#include <Storages/IStorage.h>
#include <Storages/StorageFactory.h>
#include <Storages/MergeTree/MergeTreeData.h>
#include <Storages/MergeTree/MergeTreeSettings.h>
#include <Common/typeid_cast.h>
@ -1365,6 +1366,7 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const
"Data type have to be specified for column {} to add", backQuote(column_name));
validateDataType(command.data_type, DataTypeValidationSettings(context->getSettingsRef()));
checkAllTypesAreAllowedInTable(NamesAndTypesList{{command.column_name, command.data_type}});
/// FIXME: Adding a new column of type Object(JSON) is broken.
/// Looks like there is something around default expression for this column (method `getDefault` is not implemented for the data type Object).
@ -1453,6 +1455,7 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const
if (command.data_type)
{
validateDataType(command.data_type, DataTypeValidationSettings(context->getSettingsRef()));
checkAllTypesAreAllowedInTable(NamesAndTypesList{{command.column_name, command.data_type}});
const GetColumnsOptions options(GetColumnsOptions::All);
const auto old_data_type = all_columns.getColumn(options, column_name).type;

View File

@ -60,6 +60,8 @@ namespace Setting
extern const SettingsBool materialize_statistics_on_insert;
extern const SettingsBool optimize_on_insert;
extern const SettingsBool throw_on_max_partitions_per_insert_block;
extern const SettingsUInt64 min_free_disk_bytes_to_perform_insert;
extern const SettingsDouble min_free_disk_ratio_to_perform_insert;
}
namespace ErrorCodes
@ -67,6 +69,7 @@ namespace ErrorCodes
extern const int ABORTED;
extern const int LOGICAL_ERROR;
extern const int TOO_MANY_PARTS;
extern const int NOT_ENOUGH_SPACE;
}
namespace
@ -560,6 +563,41 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPartImpl(
VolumePtr volume = data.getStoragePolicy()->getVolume(0);
VolumePtr data_part_volume = createVolumeFromReservation(reservation, volume);
const auto & global_settings = context->getSettingsRef();
const auto & data_settings = data.getSettings();
const UInt64 & min_bytes_to_perform_insert =
data_settings->min_free_disk_bytes_to_perform_insert.changed
? data_settings->min_free_disk_bytes_to_perform_insert
: global_settings[Setting::min_free_disk_bytes_to_perform_insert];
const Float64 & min_ratio_to_perform_insert =
data_settings->min_free_disk_ratio_to_perform_insert.changed
? data_settings->min_free_disk_ratio_to_perform_insert
: global_settings[Setting::min_free_disk_ratio_to_perform_insert];
if (min_bytes_to_perform_insert > 0 || min_ratio_to_perform_insert > 0.0)
{
const auto & disk = data_part_volume->getDisk();
const UInt64 & total_disk_bytes = disk->getTotalSpace().value_or(0);
const UInt64 & free_disk_bytes = disk->getAvailableSpace().value_or(0);
const UInt64 & min_bytes_from_ratio = static_cast<UInt64>(min_ratio_to_perform_insert * total_disk_bytes);
const UInt64 & needed_free_bytes = std::max(min_bytes_to_perform_insert, min_bytes_from_ratio);
if (needed_free_bytes > free_disk_bytes)
{
throw Exception(
ErrorCodes::NOT_ENOUGH_SPACE,
"Could not perform insert: less than {} free bytes left in the disk space ({}). "
"Configure this limit with user settings {} or {}",
needed_free_bytes,
free_disk_bytes,
"min_free_disk_bytes_to_perform_insert",
"min_free_disk_ratio_to_perform_insert");
}
}
auto new_data_part = data.getDataPartBuilder(part_name, data_part_volume, part_dir)
.withPartFormat(data.choosePartFormat(expected_size, block.rows()))
.withPartInfo(new_part_info)
@ -571,8 +609,6 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPartImpl(
if (data.storage_settings.get()->assign_part_uuids)
new_data_part->uuid = UUIDHelpers::generateV4();
const auto & data_settings = data.getSettings();
SerializationInfo::Settings settings{data_settings->ratio_of_defaults_for_sparse_serialization, true};
SerializationInfoByName infos(columns, settings);
infos.add(block);

View File

@ -100,7 +100,10 @@ void MergeTreeIndexGranuleSet::deserializeBinary(ReadBuffer & istr, MergeTreeInd
size_t rows_to_read = field_rows.safeGet<size_t>();
if (rows_to_read == 0)
{
block.clear();
return;
}
size_t num_columns = block.columns();

View File

@ -101,6 +101,8 @@ struct Settings;
M(Bool, optimize_row_order, false, "Allow reshuffling of rows during part inserts and merges to improve the compressibility of the new part", 0) \
M(Bool, use_adaptive_write_buffer_for_dynamic_subcolumns, true, "Allow to use adaptive writer buffers during writing dynamic subcolumns to reduce memory usage", 0) \
M(UInt64, adaptive_write_buffer_initial_size, 16 * 1024, "Initial size of an adaptive write buffer", 0) \
M(UInt64, min_free_disk_bytes_to_perform_insert, 0, "Minimum free disk space bytes to perform an insert.", 0) \
M(Double, min_free_disk_ratio_to_perform_insert, 0.0, "Minimum free disk space ratio to perform an insert.", 0) \
\
/* Part removal settings. */ \
M(UInt64, simultaneous_parts_removal_limit, 0, "Maximum number of parts to remove during one CleanupThread iteration (0 means unlimited).", 0) \

View File

@ -723,16 +723,21 @@ Strings PostgreSQLReplicationHandler::getTableAllowedColumns(const std::string &
if (tables_list.empty())
return result;
size_t table_pos = tables_list.find(table_name);
if (table_pos == std::string::npos)
size_t table_pos = 0;
while (true)
{
return result;
table_pos = tables_list.find(table_name, table_pos + 1);
if (table_pos == std::string::npos)
return result;
if (table_pos + table_name.length() + 1 > tables_list.length())
return result;
if (tables_list[table_pos + table_name.length() + 1] == '(' ||
tables_list[table_pos + table_name.length() + 1] == ',' ||
tables_list[table_pos + table_name.length() + 1] == ' '
)
break;
}
if (table_pos + table_name.length() + 1 > tables_list.length())
{
return result;
}
String column_list = tables_list.substr(table_pos + table_name.length() + 1);
column_list.erase(std::remove(column_list.begin(), column_list.end(), '"'), column_list.end());
boost::trim(column_list);

View File

@ -28,11 +28,11 @@ namespace ErrorCodes
/// Some types are only for intermediate values of expressions and cannot be used in tables.
static void checkAllTypesAreAllowedInTable(const NamesAndTypesList & names_and_types)
void checkAllTypesAreAllowedInTable(const NamesAndTypesList & names_and_types)
{
for (const auto & elem : names_and_types)
if (elem.type->cannotBeStoredInTables())
throw Exception(ErrorCodes::DATA_TYPE_CANNOT_BE_USED_IN_TABLES, "Data type {} cannot be used in tables", elem.type->getName());
throw Exception(ErrorCodes::DATA_TYPE_CANNOT_BE_USED_IN_TABLES, "Data type {} of column '{}' cannot be used in tables", elem.type->getName(), elem.name);
}

View File

@ -135,4 +135,6 @@ private:
Storages storages;
};
void checkAllTypesAreAllowedInTable(const NamesAndTypesList & names_and_types);
}

View File

@ -517,6 +517,10 @@ void StorageMaterializedView::alter(
/// We need to copy the target table's columns (after checkTargetTableHasQueryOutputColumns() they can be still different - e.g. the data types of those columns can differ).
new_metadata.columns = target_table_metadata->columns;
}
else
{
checkAllTypesAreAllowedInTable(new_metadata.getColumns().getAll());
}
DatabaseCatalog::instance().getDatabase(table_id.database_name)->alterTable(local_context, table_id, new_metadata);
setInMemoryMetadata(new_metadata);

View File

@ -427,7 +427,7 @@ def _mark_success_action(
# do nothing, exit without failure
print(f"ERROR: no status file for job [{job}]")
if job_config.run_by_label or not job_config.has_digest():
if job_config.run_by_labels or not job_config.has_digest():
print(f"Job [{job}] has no digest or run by label in CI - do not cache")
else:
if pr_info.is_master:

View File

@ -261,7 +261,7 @@ class CI:
compiler="clang-18",
package_type="fuzzers",
),
run_by_label=Tags.libFuzzer,
run_by_labels=[Tags.libFuzzer],
),
JobNames.BUILD_CHECK: CommonJobConfigs.BUILD_REPORT.with_properties(),
JobNames.INSTALL_TEST_AMD: CommonJobConfigs.INSTALL_TEST.with_properties(
@ -479,13 +479,13 @@ class CI:
),
JobNames.JEPSEN_KEEPER: JobConfig(
required_builds=[BuildNames.BINARY_RELEASE],
run_by_label="jepsen-test",
run_by_labels=[Labels.JEPSEN_TEST],
run_command="jepsen_check.py keeper",
runner_type=Runners.STYLE_CHECKER_ARM,
),
JobNames.JEPSEN_SERVER: JobConfig(
required_builds=[BuildNames.BINARY_RELEASE],
run_by_label="jepsen-test",
run_by_labels=[Labels.JEPSEN_TEST],
run_command="jepsen_check.py server",
runner_type=Runners.STYLE_CHECKER_ARM,
),
@ -495,7 +495,7 @@ class CI:
JobNames.PERFORMANCE_TEST_ARM64: CommonJobConfigs.PERF_TESTS.with_properties(
required_builds=[BuildNames.PACKAGE_AARCH64],
num_batches=4,
run_by_label="pr-performance",
run_by_labels=[Labels.PR_PERFORMANCE],
runner_type=Runners.FUNC_TESTER_ARM,
),
JobNames.SQLANCER: CommonJobConfigs.SQLLANCER_TEST.with_properties(
@ -520,7 +520,7 @@ class CI:
),
JobNames.LIBFUZZER_TEST: JobConfig(
required_builds=[BuildNames.FUZZERS],
run_by_label=Tags.libFuzzer,
run_by_labels=[Tags.libFuzzer],
timeout=10800,
run_command='libfuzzer_test_check.py "$CHECK_NAME"',
runner_type=Runners.STYLE_CHECKER,
@ -557,7 +557,7 @@ class CI:
runner_type=Runners.STYLE_CHECKER_ARM,
),
JobNames.BUGFIX_VALIDATE: JobConfig(
run_by_label="pr-bugfix",
run_by_labels=[Labels.PR_BUGFIX, Labels.PR_CRITICAL_BUGFIX],
run_command="bugfix_validate_check.py",
timeout=2400,
runner_type=Runners.STYLE_CHECKER,

View File

@ -22,6 +22,7 @@ class Labels:
PR_CHERRYPICK = "pr-cherrypick"
PR_CI = "pr-ci"
PR_FEATURE = "pr-feature"
PR_PERFORMANCE = "pr-performance"
PR_SYNCED_TO_CLOUD = "pr-synced-to-cloud"
PR_SYNC_UPSTREAM = "pr-sync-upstream"
RELEASE = "release"
@ -335,7 +336,7 @@ class JobConfig:
# sets number of batches for a multi-batch job
num_batches: int = 1
# label that enables job in CI, if set digest isn't used
run_by_label: str = ""
run_by_labels: List[str] = field(default_factory=list)
# to run always regardless of the job digest or/and label
run_always: bool = False
# disables CI await for a given job

View File

@ -151,10 +151,10 @@ class CiSettings:
return True
return False
if job_config.run_by_label:
if job_config.run_by_label in labels and is_pr:
if job_config.run_by_labels:
if set(job_config.run_by_labels).intersection(labels) and is_pr:
print(
f"Job [{job}] selected by GH label [{job_config.run_by_label}] - pass"
f"Job [{job}] selected by GH label [{job_config.run_by_labels}] - pass"
)
return True
return False

View File

@ -304,7 +304,7 @@ class TestCIConfig(unittest.TestCase):
for job, config in CI.JOB_CONFIGS.items():
if (
CI.is_build_job(job)
and not config.run_by_label
and not config.run_by_labels
and job not in expected_jobs_to_do
):
# expected to run all builds jobs
@ -358,7 +358,7 @@ class TestCIConfig(unittest.TestCase):
continue
if config.release_only:
continue
if config.run_by_label:
if config.run_by_labels:
continue
expected_jobs_to_do.append(job)
@ -391,7 +391,7 @@ class TestCIConfig(unittest.TestCase):
for job, config in CI.JOB_CONFIGS.items():
if config.pr_only:
continue
if config.run_by_label:
if config.run_by_labels:
continue
if job in CI.MQ_JOBS:
continue

View File

@ -173,9 +173,8 @@ class TestCIOptions(unittest.TestCase):
job: CI.JobConfig(runner_type=CI.Runners.STYLE_CHECKER)
for job in _TEST_JOB_LIST
}
jobs_configs["fuzzers"].run_by_label = (
"TEST_LABEL" # check "fuzzers" appears in the result due to the label
)
# check "fuzzers" appears in the result due to the label
jobs_configs["fuzzers"].run_by_labels = ["TEST_LABEL"]
jobs_configs["Integration tests (asan)"].release_only = (
True # still must be included as it's set with include keywords
)
@ -222,7 +221,7 @@ class TestCIOptions(unittest.TestCase):
}
jobs_configs["Style check"].release_only = True
jobs_configs["Fast test"].pr_only = True
jobs_configs["fuzzers"].run_by_label = "TEST_LABEL"
jobs_configs["fuzzers"].run_by_labels = ["TEST_LABEL"]
# no settings are set
filtered_jobs = list(
CiSettings().apply(
@ -311,9 +310,8 @@ class TestCIOptions(unittest.TestCase):
job: CI.JobConfig(runner_type=CI.Runners.STYLE_CHECKER)
for job in _TEST_JOB_LIST
}
jobs_configs["fuzzers"].run_by_label = (
"TEST_LABEL" # check "fuzzers" does not appears in the result
)
# check "fuzzers" does not appears in the result
jobs_configs["fuzzers"].run_by_labels = ["TEST_LABEL"]
jobs_configs["Integration tests (asan)"].release_only = True
filtered_jobs = list(
ci_options.apply(

View File

@ -14,7 +14,7 @@ Don't use Docker from your system repository.
* [pip](https://pypi.python.org/pypi/pip) and `libpq-dev`. To install: `sudo apt-get install python3-pip libpq-dev zlib1g-dev libcrypto++-dev libssl-dev libkrb5-dev python3-dev`
* [py.test](https://docs.pytest.org/) testing framework. To install: `sudo -H pip install pytest`
* [docker-compose](https://docs.docker.com/compose/) and additional python libraries. To install:
* [docker compose](https://docs.docker.com/compose/) and additional python libraries. To install:
```bash
sudo -H pip install \
@ -24,7 +24,6 @@ sudo -H pip install \
confluent-kafka \
dicttoxml \
docker \
docker-compose \
grpcio \
grpcio-tools \
kafka-python \
@ -48,7 +47,7 @@ sudo -H pip install \
nats-py
```
(highly not recommended) If you really want to use OS packages on modern debian/ubuntu instead of "pip": `sudo apt install -y docker docker-compose python3-pytest python3-dicttoxml python3-docker python3-pymysql python3-protobuf python3-pymongo python3-tzlocal python3-kazoo python3-psycopg2 kafka-python python3-pytest-timeout python3-minio`
(highly not recommended) If you really want to use OS packages on modern debian/ubuntu instead of "pip": `sudo apt install -y docker docker-compose-v2 python3-pytest python3-dicttoxml python3-docker python3-pymysql python3-protobuf python3-pymongo python3-tzlocal python3-kazoo python3-psycopg2 kafka-python python3-pytest-timeout python3-minio`
Some tests have other dependencies, e.g. spark. See docker/test/integration/runner/Dockerfile for how to install those. See docker/test/integration/runner/dockerd-entrypoint.sh for environment variables that need to be set (e.g. JAVA_PATH).

View File

@ -1,5 +1,3 @@
version: '2.3'
services:
azurite1:
image: mcr.microsoft.com/azure-storage/azurite

View File

@ -1,4 +1,3 @@
version: '2.3'
services:
cassandra1:
image: cassandra:4.0

View File

@ -1,5 +1,4 @@
version: '2.3'
# Used to pre-pull images with docker-compose
# Used to pre-pull images with docker compose
services:
clickhouse1:
image: clickhouse/integration-test

View File

@ -1,5 +1,3 @@
version: "2.3"
services:
coredns:
image: coredns/coredns:1.9.3 # :latest broke this test

View File

@ -1,4 +1,3 @@
version: '2.3'
services:
dotnet1:
image: clickhouse/dotnet-client:${DOCKER_DOTNET_CLIENT_TAG:-latest}

View File

@ -1,4 +1,3 @@
version: '2.3'
services:
hdfs1:
image: prasanthj/docker-hadoop:2.6.0

View File

@ -1,4 +1,3 @@
version: '2.3'
services:
hdfs1:
image: lgboustc/hive_test:v2.0

View File

@ -1,4 +1,3 @@
version: '2.3'
services:
bridge1:
image: clickhouse/jdbc-bridge
@ -24,4 +23,4 @@ services:
volumes:
- type: ${JDBC_BRIDGE_FS:-tmpfs}
source: ${JDBC_BRIDGE_LOGS:-}
target: /app/logs
target: /app/logs

View File

@ -1,5 +1,3 @@
version: '2.3'
services:
kafka_zookeeper:
image: zookeeper:3.4.9

View File

@ -1,4 +1,3 @@
version: '2.3'
services:
zoo1:
image: ${image:-clickhouse/integration-test}

View File

@ -1,5 +1,3 @@
version: '2.3'
services:
kerberizedhdfs1:
cap_add:

View File

@ -1,5 +1,3 @@
version: '2.3'
services:
kafka_kerberized_zookeeper:
image: confluentinc/cp-zookeeper:5.2.0

View File

@ -1,5 +1,3 @@
version: '2.3'
services:
kerberoskdc:
image: clickhouse/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}

View File

@ -1,4 +1,3 @@
version: '2.3'
services:
openldap:
image: bitnami/openldap:2.6.6

View File

@ -1,5 +1,3 @@
version: '2.3'
services:
minio1:
image: minio/minio:RELEASE.2024-07-31T05-46-26Z

View File

@ -1,4 +1,3 @@
version: '2.3'
services:
mongo1:
image: mongo:6.0

View File

@ -1,4 +1,3 @@
version: '2.3'
services:
mysql57:
image: mysql:5.7

View File

@ -1,4 +1,3 @@
version: '2.3'
services:
mysql80:
image: mysql:8.0

View File

@ -1,4 +1,3 @@
version: '2.3'
services:
mysql_client:
image: mysql:8.0

View File

@ -1,4 +1,3 @@
version: '2.3'
services:
mysql2:
image: mysql:8.0

View File

@ -1,4 +1,3 @@
version: '2.3'
services:
golang1:
image: clickhouse/mysql-golang-client:${DOCKER_MYSQL_GOLANG_CLIENT_TAG:-latest}

View File

@ -1,4 +1,3 @@
version: '2.3'
services:
java1:
image: clickhouse/mysql-java-client:${DOCKER_MYSQL_JAVA_CLIENT_TAG:-latest}

View File

@ -1,4 +1,3 @@
version: '2.3'
services:
mysqljs1:
image: clickhouse/mysql-js-client:${DOCKER_MYSQL_JS_CLIENT_TAG:-latest}

View File

@ -1,4 +1,3 @@
version: '2.3'
services:
php1:
image: clickhouse/mysql-php-client:${DOCKER_MYSQL_PHP_CLIENT_TAG:-latest}

View File

@ -1,4 +1,3 @@
version: '2.3'
services:
nats1:
image: nats

View File

@ -1,4 +1,3 @@
version: '2.3'
networks:
default:
driver: bridge

View File

@ -1,4 +1,3 @@
version: '2.3'
services:
# nginx server to host static files.
# Accepts only PUT data by test.com/path and GET already existing data on test.com/path.

View File

@ -1,4 +1,3 @@
version: '2.3'
services:
postgres1:
image: postgres

View File

@ -1,4 +1,3 @@
version: '2.3'
services:
postgres2:
image: postgres
@ -41,4 +40,4 @@ services:
volumes:
- type: ${POSTGRES_LOGS_FS:-tmpfs}
source: ${POSTGRES4_DIR:-}
target: /postgres/
target: /postgres/

Some files were not shown because too many files have changed in this diff Show More