mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 16:42:05 +00:00
Merge branch 'master' into random_timezone_for_stateless_tests
This commit is contained in:
commit
f56269243e
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -2,25 +2,23 @@ I hereby agree to the terms of the CLA available at: https://yandex.ru/legal/cla
|
||||
|
||||
Changelog category (leave one):
|
||||
- New Feature
|
||||
- Bug Fix
|
||||
- Improvement
|
||||
- Bug Fix
|
||||
- Performance Improvement
|
||||
- Backward Incompatible Change
|
||||
- Build/Testing/Packaging Improvement
|
||||
- Documentation (changelog entry is not required)
|
||||
- Other
|
||||
- Not for changelog (changelog entry is not required)
|
||||
|
||||
|
||||
Changelog entry (a user-readable short description of the changes that goes to CHANGELOG.md):
|
||||
|
||||
...
|
||||
|
||||
|
||||
Detailed description / Documentation draft:
|
||||
|
||||
...
|
||||
|
||||
|
||||
By adding documentation, you'll allow users to try your new feature immediately, not when someone else will have time to document it later. Documentation is necessary for all features that affect user experience in any way. You can add brief documentation draft above, or add documentation right into your patch as Markdown files in [docs](https://github.com/ClickHouse/ClickHouse/tree/master/docs) folder.
|
||||
|
||||
If you are doing this for the first time, it's recommended to read the lightweight [Contributing to ClickHouse Documentation](https://github.com/ClickHouse/ClickHouse/tree/master/docs/README.md) guide first.
|
||||
|
@ -13,3 +13,6 @@ ClickHouse® is an open-source column-oriented database management system that a
|
||||
* [Code Browser](https://clickhouse.tech/codebrowser/html_report/ClickHouse/index.html) with syntax highlight and navigation.
|
||||
* [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any.
|
||||
* You can also [fill this form](https://clickhouse.tech/#meet) to meet Yandex ClickHouse team in person.
|
||||
|
||||
## Upcoming Events
|
||||
* [ClickHouse Meetup by ByteDance (online)](https://www.meetup.com/ByteDanceDev-group/events/279543467/) on 23 July 2021.
|
||||
|
@ -18,6 +18,8 @@
|
||||
|
||||
#define DATE_LUT_MAX (0xFFFFFFFFU - 86400)
|
||||
#define DATE_LUT_MAX_DAY_NUM 0xFFFF
|
||||
/// Max int value of Date32, DATE LUT cache size minus daynum_offset_epoch
|
||||
#define DATE_LUT_MAX_EXTEND_DAY_NUM (DATE_LUT_SIZE - 16436)
|
||||
|
||||
/// A constant to add to time_t so every supported time point becomes non-negative and still has the same remainder of division by 3600.
|
||||
/// If we treat "remainder of division" operation in the sense of modular arithmetic (not like in C++).
|
||||
@ -270,6 +272,8 @@ public:
|
||||
auto getOffsetAtStartOfEpoch() const { return offset_at_start_of_epoch; }
|
||||
auto getTimeOffsetAtStartOfLUT() const { return offset_at_start_of_lut; }
|
||||
|
||||
auto getDayNumOffsetEpoch() const { return daynum_offset_epoch; }
|
||||
|
||||
/// All functions below are thread-safe; arguments are not checked.
|
||||
|
||||
inline ExtendedDayNum toDayNum(ExtendedDayNum d) const
|
||||
@ -926,15 +930,17 @@ public:
|
||||
{
|
||||
if (unlikely(year < DATE_LUT_MIN_YEAR || year > DATE_LUT_MAX_YEAR || month < 1 || month > 12 || day_of_month < 1 || day_of_month > 31))
|
||||
return LUTIndex(0);
|
||||
|
||||
return LUTIndex{years_months_lut[(year - DATE_LUT_MIN_YEAR) * 12 + month - 1] + day_of_month - 1};
|
||||
auto year_lut_index = (year - DATE_LUT_MIN_YEAR) * 12 + month - 1;
|
||||
UInt32 index = years_months_lut[year_lut_index].toUnderType() + day_of_month - 1;
|
||||
/// When date is out of range, default value is DATE_LUT_SIZE - 1 (2283-11-11)
|
||||
return LUTIndex{std::min(index, static_cast<UInt32>(DATE_LUT_SIZE - 1))};
|
||||
}
|
||||
|
||||
/// Create DayNum from year, month, day of month.
|
||||
inline ExtendedDayNum makeDayNum(Int16 year, UInt8 month, UInt8 day_of_month) const
|
||||
inline ExtendedDayNum makeDayNum(Int16 year, UInt8 month, UInt8 day_of_month, Int32 default_error_day_num = 0) const
|
||||
{
|
||||
if (unlikely(year < DATE_LUT_MIN_YEAR || year > DATE_LUT_MAX_YEAR || month < 1 || month > 12 || day_of_month < 1 || day_of_month > 31))
|
||||
return ExtendedDayNum(0);
|
||||
return ExtendedDayNum(default_error_day_num);
|
||||
|
||||
return toDayNum(makeLUTIndex(year, month, day_of_month));
|
||||
}
|
||||
@ -1091,9 +1097,9 @@ public:
|
||||
return lut[new_index].date + time;
|
||||
}
|
||||
|
||||
inline NO_SANITIZE_UNDEFINED Time addWeeks(Time t, Int64 delta) const
|
||||
inline NO_SANITIZE_UNDEFINED Time addWeeks(Time t, Int32 delta) const
|
||||
{
|
||||
return addDays(t, delta * 7);
|
||||
return addDays(t, static_cast<Int64>(delta) * 7);
|
||||
}
|
||||
|
||||
inline UInt8 saturateDayOfMonth(Int16 year, UInt8 month, UInt8 day_of_month) const
|
||||
@ -1158,14 +1164,14 @@ public:
|
||||
return toDayNum(addMonthsIndex(d, delta));
|
||||
}
|
||||
|
||||
inline Time NO_SANITIZE_UNDEFINED addQuarters(Time t, Int64 delta) const
|
||||
inline Time NO_SANITIZE_UNDEFINED addQuarters(Time t, Int32 delta) const
|
||||
{
|
||||
return addMonths(t, delta * 3);
|
||||
return addMonths(t, static_cast<Int64>(delta) * 3);
|
||||
}
|
||||
|
||||
inline ExtendedDayNum addQuarters(ExtendedDayNum d, Int64 delta) const
|
||||
inline ExtendedDayNum addQuarters(ExtendedDayNum d, Int32 delta) const
|
||||
{
|
||||
return addMonths(d, delta * 3);
|
||||
return addMonths(d, static_cast<Int64>(delta) * 3);
|
||||
}
|
||||
|
||||
template <typename DateOrTime>
|
||||
|
@ -70,6 +70,14 @@ public:
|
||||
m_day = values.day_of_month;
|
||||
}
|
||||
|
||||
explicit LocalDate(ExtendedDayNum day_num)
|
||||
{
|
||||
const auto & values = DateLUT::instance().getValues(day_num);
|
||||
m_year = values.year;
|
||||
m_month = values.month;
|
||||
m_day = values.day_of_month;
|
||||
}
|
||||
|
||||
LocalDate(unsigned short year_, unsigned char month_, unsigned char day_)
|
||||
: m_year(year_), m_month(month_), m_day(day_)
|
||||
{
|
||||
@ -98,6 +106,12 @@ public:
|
||||
return DayNum(lut.makeDayNum(m_year, m_month, m_day).toUnderType());
|
||||
}
|
||||
|
||||
ExtendedDayNum getExtenedDayNum() const
|
||||
{
|
||||
const auto & lut = DateLUT::instance();
|
||||
return ExtendedDayNum (lut.makeDayNum(m_year, m_month, m_day).toUnderType());
|
||||
}
|
||||
|
||||
operator DayNum() const
|
||||
{
|
||||
return getDayNum();
|
||||
|
@ -69,7 +69,7 @@ void convertHistoryFile(const std::string & path, replxx::Replxx & rx)
|
||||
}
|
||||
|
||||
std::string line;
|
||||
if (!getline(in, line).good())
|
||||
if (getline(in, line).bad())
|
||||
{
|
||||
rx.print("Cannot read from %s (for conversion): %s\n",
|
||||
path.c_str(), errnoToString(errno).c_str());
|
||||
@ -78,7 +78,7 @@ void convertHistoryFile(const std::string & path, replxx::Replxx & rx)
|
||||
|
||||
/// This is the marker of the date, no need to convert.
|
||||
static char const REPLXX_TIMESTAMP_PATTERN[] = "### dddd-dd-dd dd:dd:dd.ddd";
|
||||
if (line.starts_with("### ") && line.size() == strlen(REPLXX_TIMESTAMP_PATTERN))
|
||||
if (line.empty() || (line.starts_with("### ") && line.size() == strlen(REPLXX_TIMESTAMP_PATTERN)))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
24
base/common/removeDuplicates.h
Normal file
24
base/common/removeDuplicates.h
Normal file
@ -0,0 +1,24 @@
|
||||
#pragma once
|
||||
#include <vector>
|
||||
|
||||
/// Removes duplicates from a container without changing the order of its elements.
|
||||
/// Keeps the last occurrence of each element.
|
||||
/// Should NOT be used for containers with a lot of elements because it has O(N^2) complexity.
|
||||
template <typename T>
|
||||
void removeDuplicatesKeepLast(std::vector<T> & vec)
|
||||
{
|
||||
auto begin = vec.begin();
|
||||
auto end = vec.end();
|
||||
auto new_begin = end;
|
||||
for (auto current = end; current != begin;)
|
||||
{
|
||||
--current;
|
||||
if (std::find(new_begin, end, *current) == end)
|
||||
{
|
||||
--new_begin;
|
||||
if (new_begin != current)
|
||||
*new_begin = *current;
|
||||
}
|
||||
}
|
||||
vec.erase(begin, new_begin);
|
||||
}
|
@ -115,6 +115,8 @@ set(S2_SRCS
|
||||
|
||||
add_library(s2 ${S2_SRCS})
|
||||
|
||||
set_property(TARGET s2 PROPERTY CXX_STANDARD 11)
|
||||
|
||||
if (OPENSSL_FOUND)
|
||||
target_link_libraries(s2 PRIVATE ${OPENSSL_LIBRARIES})
|
||||
endif()
|
||||
|
@ -27,7 +27,7 @@ RUN apt-get update \
|
||||
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
|
||||
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
|
||||
# Significantly increase deb packaging speed and compatible with old systems
|
||||
RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/1/dpkg-deb \
|
||||
RUN curl -O https://clickhouse-datasets.s3.yandex.net/utils/1/dpkg-deb \
|
||||
&& chmod +x dpkg-deb \
|
||||
&& cp dpkg-deb /usr/bin
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
FROM yandex/clickhouse-deb-builder
|
||||
|
||||
RUN export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
|
||||
&& wget -nv -O /tmp/arrow-keyring.deb "https://apache.bintray.com/arrow/ubuntu/apache-arrow-archive-keyring-latest-${CODENAME}.deb" \
|
||||
&& wget -nv -O /tmp/arrow-keyring.deb "https://apache.jfrog.io/artifactory/arrow/ubuntu/apache-arrow-apt-source-latest-${CODENAME}.deb" \
|
||||
&& dpkg -i /tmp/arrow-keyring.deb
|
||||
|
||||
# Libraries from OS are only needed to test the "unbundled" build (that is not used in production).
|
||||
|
@ -72,7 +72,10 @@ do
|
||||
|
||||
if [ "$DO_CHOWN" = "1" ]; then
|
||||
# ensure proper directories permissions
|
||||
# but skip it for if directory already has proper premissions, cause recursive chown may be slow
|
||||
if [ "$(stat -c %u "$dir")" != "$USER" ] || [ "$(stat -c %g "$dir")" != "$GROUP" ]; then
|
||||
chown -R "$USER:$GROUP" "$dir"
|
||||
fi
|
||||
elif ! $gosu test -d "$dir" -a -w "$dir" -a -r "$dir"; then
|
||||
echo "Necessary directory '$dir' isn't accessible by user with id '$USER'"
|
||||
exit 1
|
||||
@ -161,6 +164,10 @@ fi
|
||||
|
||||
# if no args passed to `docker run` or first argument start with `--`, then the user is passing clickhouse-server arguments
|
||||
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
||||
# Watchdog is launched by default, but does not send SIGINT to the main process,
|
||||
# so the container can't be finished by ctrl+c
|
||||
CLICKHOUSE_WATCHDOG_ENABLE=${CLICKHOUSE_WATCHDOG_ENABLE:-0}
|
||||
export CLICKHOUSE_WATCHDOG_ENABLE
|
||||
exec $gosu /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@"
|
||||
fi
|
||||
|
||||
|
@ -27,7 +27,7 @@ RUN apt-get update \
|
||||
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
|
||||
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
|
||||
# Significantly increase deb packaging speed and compatible with old systems
|
||||
RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/1/dpkg-deb \
|
||||
RUN curl -O https://clickhouse-datasets.s3.yandex.net/utils/1/dpkg-deb \
|
||||
&& chmod +x dpkg-deb \
|
||||
&& cp dpkg-deb /usr/bin
|
||||
|
||||
|
@ -27,7 +27,7 @@ RUN apt-get update \
|
||||
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
|
||||
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
|
||||
# Significantly increase deb packaging speed and compatible with old systems
|
||||
RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/1/dpkg-deb \
|
||||
RUN curl -O https://clickhouse-datasets.s3.yandex.net/utils/1/dpkg-deb \
|
||||
&& chmod +x dpkg-deb \
|
||||
&& cp dpkg-deb /usr/bin
|
||||
|
||||
@ -65,7 +65,7 @@ RUN apt-get update \
|
||||
unixodbc \
|
||||
--yes --no-install-recommends
|
||||
|
||||
RUN pip3 install numpy scipy pandas
|
||||
RUN pip3 install numpy scipy pandas Jinja2
|
||||
|
||||
# This symlink required by gcc to find lld compiler
|
||||
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
|
||||
|
@ -194,6 +194,10 @@ continue
|
||||
jobs
|
||||
pstree -aspgT
|
||||
|
||||
server_exit_code=0
|
||||
wait $server_pid || server_exit_code=$?
|
||||
echo "Server exit code is $server_exit_code"
|
||||
|
||||
# Make files with status and description we'll show for this check on Github.
|
||||
task_exit_code=$fuzzer_exit_code
|
||||
if [ "$server_died" == 1 ]
|
||||
|
@ -76,6 +76,7 @@ RUN python3 -m pip install \
|
||||
pytest \
|
||||
pytest-timeout \
|
||||
pytest-xdist \
|
||||
pytest-repeat \
|
||||
redis \
|
||||
tzlocal \
|
||||
urllib3 \
|
||||
|
@ -2,7 +2,7 @@ version: '2.3'
|
||||
services:
|
||||
postgres1:
|
||||
image: postgres
|
||||
command: ["postgres", "-c", "logging_collector=on", "-c", "log_directory=/postgres/logs", "-c", "log_filename=postgresql.log", "-c", "log_statement=all"]
|
||||
command: ["postgres", "-c", "logging_collector=on", "-c", "log_directory=/postgres/logs", "-c", "log_filename=postgresql.log", "-c", "log_statement=all", "-c", "max_connections=200"]
|
||||
restart: always
|
||||
expose:
|
||||
- ${POSTGRES_PORT}
|
||||
|
@ -1196,7 +1196,7 @@ create table changes engine File(TSV, 'metrics/changes.tsv') as
|
||||
if(left > right, left / right, right / left) times_diff
|
||||
from metrics
|
||||
group by metric
|
||||
having abs(diff) > 0.05 and isFinite(diff)
|
||||
having abs(diff) > 0.05 and isFinite(diff) and isFinite(times_diff)
|
||||
)
|
||||
order by diff desc
|
||||
;
|
||||
|
@ -32,7 +32,7 @@ RUN apt-get update -y \
|
||||
postgresql-client \
|
||||
sqlite3
|
||||
|
||||
RUN pip3 install numpy scipy pandas
|
||||
RUN pip3 install numpy scipy pandas Jinja2
|
||||
|
||||
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
||||
&& wget -nv -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \
|
||||
|
@ -12,7 +12,7 @@ UNKNOWN_SIGN = "[ UNKNOWN "
|
||||
SKIPPED_SIGN = "[ SKIPPED "
|
||||
HUNG_SIGN = "Found hung queries in processlist"
|
||||
|
||||
NO_TASK_TIMEOUT_SIGN = "All tests have finished"
|
||||
NO_TASK_TIMEOUT_SIGNS = ["All tests have finished", "No tests were run"]
|
||||
|
||||
RETRIES_SIGN = "Some tests were restarted"
|
||||
|
||||
@ -29,7 +29,7 @@ def process_test_log(log_path):
|
||||
with open(log_path, 'r') as test_file:
|
||||
for line in test_file:
|
||||
line = line.strip()
|
||||
if NO_TASK_TIMEOUT_SIGN in line:
|
||||
if any(s in line for s in NO_TASK_TIMEOUT_SIGNS):
|
||||
task_timeout = False
|
||||
if HUNG_SIGN in line:
|
||||
hung = True
|
||||
@ -80,6 +80,7 @@ def process_result(result_path):
|
||||
if result_path and os.path.exists(result_path):
|
||||
total, skipped, unknown, failed, success, hung, task_timeout, retries, test_results = process_test_log(result_path)
|
||||
is_flacky_check = 1 < int(os.environ.get('NUM_TRIES', 1))
|
||||
logging.info("Is flacky check: %s", is_flacky_check)
|
||||
# If no tests were run (success == 0) it indicates an error (e.g. server did not start or crashed immediately)
|
||||
# But it's Ok for "flaky checks" - they can contain just one test for check which is marked as skipped.
|
||||
if failed != 0 or unknown != 0 or (success == 0 and (not is_flacky_check)):
|
||||
|
@ -123,7 +123,7 @@ For installing CMake and Ninja on Mac OS X first install Homebrew and then insta
|
||||
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
|
||||
brew install cmake ninja
|
||||
|
||||
Next, check the version of CMake: `cmake --version`. If it is below 3.3, you should install a newer version from the website: https://cmake.org/download/.
|
||||
Next, check the version of CMake: `cmake --version`. If it is below 3.12, you should install a newer version from the website: https://cmake.org/download/.
|
||||
|
||||
## Optional External Libraries {#optional-external-libraries}
|
||||
|
||||
|
@ -82,6 +82,8 @@ MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([
|
||||
|
||||
- If `_sign` is not specified in the `SELECT` query, `WHERE _sign=1` is used by default. So the deleted rows are not included into the result set.
|
||||
|
||||
- The result includes columns comments in case they exist in MySQL database tables.
|
||||
|
||||
### Index Conversion {#index-conversion}
|
||||
|
||||
MySQL `PRIMARY KEY` and `INDEX` clauses are converted into `ORDER BY` tuples in ClickHouse tables.
|
||||
|
@ -30,21 +30,25 @@ Other common parameters are inherited from clickhouse-server config (`listen_hos
|
||||
|
||||
Internal coordination settings are located in `<keeper_server>.<coordination_settings>` section:
|
||||
|
||||
- `operation_timeout_ms` — timeout for a single client operation
|
||||
- `session_timeout_ms` — timeout for client session
|
||||
- `dead_session_check_period_ms` — how often clickhouse-keeper check dead sessions and remove them
|
||||
- `heart_beat_interval_ms` — how often a clickhouse-keeper leader will send heartbeats to followers
|
||||
- `election_timeout_lower_bound_ms` — if follower didn't receive heartbeats from the leader in this interval, then it can initiate leader election
|
||||
- `election_timeout_upper_bound_ms` — if follower didn't receive heartbeats from the leader in this interval, then it must initiate leader election
|
||||
- `rotate_log_storage_interval` — how many logs to store in a single file
|
||||
- `reserved_log_items` — how many coordination logs to store before compaction
|
||||
- `snapshot_distance` — how often clickhouse-keeper will create new snapshots (in the number of logs)
|
||||
- `snapshots_to_keep` — how many snapshots to keep
|
||||
- `stale_log_gap` — the threshold when leader consider follower as stale and send snapshot to it instead of logs
|
||||
- `force_sync` — call `fsync` on each write to coordination log
|
||||
- `raft_logs_level` — text logging level about coordination (trace, debug, and so on)
|
||||
- `shutdown_timeout` — wait to finish internal connections and shutdown
|
||||
- `startup_timeout` — if the server doesn't connect to other quorum participants in the specified timeout it will terminate
|
||||
- `operation_timeout_ms` — timeout for a single client operation (default: 10000)
|
||||
- `session_timeout_ms` — timeout for client session (default: 30000)
|
||||
- `dead_session_check_period_ms` — how often clickhouse-keeper check dead sessions and remove them (default: 500)
|
||||
- `heart_beat_interval_ms` — how often a clickhouse-keeper leader will send heartbeats to followers (default: 500)
|
||||
- `election_timeout_lower_bound_ms` — if follower didn't receive heartbeats from the leader in this interval, then it can initiate leader election (default: 1000)
|
||||
- `election_timeout_upper_bound_ms` — if follower didn't receive heartbeats from the leader in this interval, then it must initiate leader election (default: 2000)
|
||||
- `rotate_log_storage_interval` — how many log records to store in a single file (default: 100000)
|
||||
- `reserved_log_items` — how many coordination log records to store before compaction (default: 100000)
|
||||
- `snapshot_distance` — how often clickhouse-keeper will create new snapshots (in the number of records in logs) (default: 100000)
|
||||
- `snapshots_to_keep` — how many snapshots to keep (default: 3)
|
||||
- `stale_log_gap` — the threshold when leader consider follower as stale and send snapshot to it instead of logs (default: 10000)
|
||||
- `fresh_log_gap` - when node became fresh (default: 200)
|
||||
- `max_requests_batch_size` - max size of batch in requests count before it will be sent to RAFT (default: 100)
|
||||
- `force_sync` — call `fsync` on each write to coordination log (default: true)
|
||||
- `quorum_reads` - execute read requests as writes through whole RAFT consesus with similar speed (default: false)
|
||||
- `raft_logs_level` — text logging level about coordination (trace, debug, and so on) (default: system default)
|
||||
- `auto_forwarding` - allow to forward write requests from followers to leader (default: true)
|
||||
- `shutdown_timeout` — wait to finish internal connections and shutdown (ms) (default: 5000)
|
||||
- `startup_timeout` — if the server doesn't connect to other quorum participants in the specified timeout it will terminate (ms) (default: 30000)
|
||||
|
||||
Quorum configuration is located in `<keeper_server>.<raft_configuration>` section and contain servers description. The only parameter for the whole quorum is `secure`, which enables encrypted connection for communication between quorum participants. The main parameters for each `<server>` are:
|
||||
|
||||
|
@ -34,6 +34,7 @@ Configuration template:
|
||||
<min_part_size>...</min_part_size>
|
||||
<min_part_size_ratio>...</min_part_size_ratio>
|
||||
<method>...</method>
|
||||
<level>...</level>
|
||||
</case>
|
||||
...
|
||||
</compression>
|
||||
@ -43,7 +44,8 @@ Configuration template:
|
||||
|
||||
- `min_part_size` – The minimum size of a data part.
|
||||
- `min_part_size_ratio` – The ratio of the data part size to the table size.
|
||||
- `method` – Compression method. Acceptable values: `lz4` or `zstd`.
|
||||
- `method` – Compression method. Acceptable values: `lz4`, `lz4hc`, `zstd`.
|
||||
- `level` – Compression level. See [Codecs](../../sql-reference/statements/create/table/#create-query-general-purpose-codecs).
|
||||
|
||||
You can configure multiple `<case>` sections.
|
||||
|
||||
@ -62,6 +64,7 @@ If no conditions met for a data part, ClickHouse uses the `lz4` compression.
|
||||
<min_part_size>10000000000</min_part_size>
|
||||
<min_part_size_ratio>0.01</min_part_size_ratio>
|
||||
<method>zstd</method>
|
||||
<level>1</level>
|
||||
</case>
|
||||
</compression>
|
||||
```
|
||||
@ -713,7 +716,7 @@ Keys for server/client settings:
|
||||
- extendedVerification – Automatically extended verification of certificates after the session ends. Acceptable values: `true`, `false`.
|
||||
- requireTLSv1 – Require a TLSv1 connection. Acceptable values: `true`, `false`.
|
||||
- requireTLSv1_1 – Require a TLSv1.1 connection. Acceptable values: `true`, `false`.
|
||||
- requireTLSv1 – Require a TLSv1.2 connection. Acceptable values: `true`, `false`.
|
||||
- requireTLSv1_2 – Require a TLSv1.2 connection. Acceptable values: `true`, `false`.
|
||||
- fips – Activates OpenSSL FIPS mode. Supported if the library’s OpenSSL version supports FIPS.
|
||||
- privateKeyPassphraseHandler – Class (PrivateKeyPassphraseHandler subclass) that requests the passphrase for accessing the private key. For example: `<privateKeyPassphraseHandler>`, `<name>KeyFileHandler</name>`, `<options><password>test</password></options>`, `</privateKeyPassphraseHandler>`.
|
||||
- invalidCertificateHandler – Class (a subclass of CertificateHandler) for verifying invalid certificates. For example: `<invalidCertificateHandler> <name>ConsoleCertificateHandler</name> </invalidCertificateHandler>` .
|
||||
|
@ -509,6 +509,23 @@ Possible values:
|
||||
|
||||
Default value: `ALL`.
|
||||
|
||||
## join_algorithm {#settings-join_algorithm}
|
||||
|
||||
Specifies [JOIN](../../sql-reference/statements/select/join.md) algorithm.
|
||||
|
||||
Possible values:
|
||||
|
||||
- `hash` — [Hash join algorithm](https://en.wikipedia.org/wiki/Hash_join) is used.
|
||||
- `partial_merge` — [Sort-merge algorithm](https://en.wikipedia.org/wiki/Sort-merge_join) is used.
|
||||
- `prefer_partial_merge` — ClickHouse always tries to use `merge` join if possible.
|
||||
- `auto` — ClickHouse tries to change `hash` join to `merge` join on the fly to avoid out of memory.
|
||||
|
||||
Default value: `hash`.
|
||||
|
||||
When using `hash` algorithm the right part of `JOIN` is uploaded into RAM.
|
||||
|
||||
When using `partial_merge` algorithm ClickHouse sorts the data and dumps it to the disk. The `merge` algorithm in ClickHouse differs a bit from the classic realization. First ClickHouse sorts the right table by [join key](../../sql-reference/statements/select/join.md#select-join) in blocks and creates min-max index for sorted blocks. Then it sorts parts of left table by `join key` and joins them over right table. The min-max index is also used to skip unneeded right table blocks.
|
||||
|
||||
## join_any_take_last_row {#settings-join_any_take_last_row}
|
||||
|
||||
Changes behaviour of join operations with `ANY` strictness.
|
||||
@ -2007,13 +2024,13 @@ Default value: 16.
|
||||
|
||||
## merge_selecting_sleep_ms {#merge_selecting_sleep_ms}
|
||||
|
||||
Sleep time for merge selecting when no part selected, a lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters
|
||||
Sleep time for merge selecting when no part is selected. A lower setting triggers selecting tasks in `background_schedule_pool` frequently, which results in a large number of requests to Zookeeper in large-scale clusters.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Any positive integer.
|
||||
|
||||
Default value: 5000
|
||||
Default value: `5000`.
|
||||
|
||||
## parallel_distributed_insert_select {#parallel_distributed_insert_select}
|
||||
|
||||
|
@ -9,8 +9,8 @@ toc_title: Map(key, value)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `key` — The key part of the pair. [String](../../sql-reference/data-types/string.md) or [Integer](../../sql-reference/data-types/int-uint.md).
|
||||
- `value` — The value part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md) or [Array](../../sql-reference/data-types/array.md).
|
||||
- `key` — The key part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md), or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `value` — The value part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [Array](../../sql-reference/data-types/array.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md), or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
|
||||
To get the value from an `a Map('key', 'value')` column, use `a['key']` syntax. This lookup works now with a linear complexity.
|
||||
|
||||
|
@ -598,7 +598,7 @@ SOURCE(CLICKHOUSE(
|
||||
table 'ids'
|
||||
where 'id=10'
|
||||
secure 1
|
||||
))
|
||||
));
|
||||
```
|
||||
|
||||
Setting fields:
|
||||
|
@ -87,7 +87,7 @@ SELECT
|
||||
dictGetOrDefault('ext-dict-test', 'c1', number + 1, toUInt32(number * 10)) AS val,
|
||||
toTypeName(val) AS type
|
||||
FROM system.numbers
|
||||
LIMIT 3
|
||||
LIMIT 3;
|
||||
```
|
||||
|
||||
``` text
|
||||
|
@ -211,7 +211,7 @@ SELECT nullIf(1, 2);
|
||||
|
||||
## assumeNotNull {#assumenotnull}
|
||||
|
||||
Results in a value of type [Nullable](../../sql-reference/data-types/nullable.md) for a non- `Nullable`, if the value is not `NULL`.
|
||||
Results in an equivalent non-`Nullable` value for a [Nullable](../../sql-reference/data-types/nullable.md) type. In case the original value is `NULL` the result is undetermined. See also `ifNull` and `coalesce` functions.
|
||||
|
||||
``` sql
|
||||
assumeNotNull(x)
|
||||
|
@ -189,7 +189,7 @@ CREATE TABLE codec_example
|
||||
dt Date CODEC(ZSTD),
|
||||
ts DateTime CODEC(LZ4HC),
|
||||
float_value Float32 CODEC(NONE),
|
||||
double_value Float64 CODEC(LZ4HC(9))
|
||||
double_value Float64 CODEC(LZ4HC(9)),
|
||||
value Float32 CODEC(Delta, ZSTD)
|
||||
)
|
||||
ENGINE = <Engine>
|
||||
|
@ -13,7 +13,7 @@ To revoke privileges, use the [REVOKE](../../sql-reference/statements/revoke.md)
|
||||
## Granting Privilege Syntax {#grant-privigele-syntax}
|
||||
|
||||
``` sql
|
||||
GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.table|db.*|*.*|table|*} TO {user | role | CURRENT_USER} [,...] [WITH GRANT OPTION]
|
||||
GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.table|db.*|*.*|table|*} TO {user | role | CURRENT_USER} [,...] [WITH GRANT OPTION] [WITH REPLACE OPTION]
|
||||
```
|
||||
|
||||
- `privilege` — Type of privilege.
|
||||
@ -21,17 +21,19 @@ GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.ta
|
||||
- `user` — ClickHouse user account.
|
||||
|
||||
The `WITH GRANT OPTION` clause grants `user` or `role` with permission to execute the `GRANT` query. Users can grant privileges of the same scope they have and less.
|
||||
The `WITH REPLACE OPTION` clause replace old privileges by new privileges for the `user` or `role`, if not specified it is append privileges.
|
||||
|
||||
## Assigning Role Syntax {#assign-role-syntax}
|
||||
|
||||
``` sql
|
||||
GRANT [ON CLUSTER cluster_name] role [,...] TO {user | another_role | CURRENT_USER} [,...] [WITH ADMIN OPTION]
|
||||
GRANT [ON CLUSTER cluster_name] role [,...] TO {user | another_role | CURRENT_USER} [,...] [WITH ADMIN OPTION] [WITH REPLACE OPTION]
|
||||
```
|
||||
|
||||
- `role` — ClickHouse user role.
|
||||
- `user` — ClickHouse user account.
|
||||
|
||||
The `WITH ADMIN OPTION` clause grants [ADMIN OPTION](#admin-option-privilege) privilege to `user` or `role`.
|
||||
The `WITH REPLACE OPTION` clause replace old roles by new role for the `user` or `role`, if not specified it is append roles.
|
||||
|
||||
## Usage {#grant-usage}
|
||||
|
||||
|
@ -36,14 +36,23 @@ Additional join types available in ClickHouse:
|
||||
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` and `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
|
||||
- `ASOF JOIN` and `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
|
||||
|
||||
## Setting {#join-settings}
|
||||
## Settings {#join-settings}
|
||||
|
||||
!!! note "Note"
|
||||
The default join type can be overriden using [join_default_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) setting.
|
||||
|
||||
Also the behavior of ClickHouse server for `ANY JOIN` operations depends on the [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys) setting.
|
||||
The behavior of ClickHouse server for `ANY JOIN` operations depends on the [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys) setting.
|
||||
|
||||
### ASOF JOIN Usage {#asof-join-usage}
|
||||
**See also**
|
||||
|
||||
- [join_algorithm](../../../operations/settings/settings.md#settings-join_algorithm)
|
||||
- [join_any_take_last_row](../../../operations/settings/settings.md#settings-join_any_take_last_row)
|
||||
- [join_use_nulls](../../../operations/settings/settings.md#join_use_nulls)
|
||||
- [partial_merge_join_optimizations](../../../operations/settings/settings.md#partial_merge_join_optimizations)
|
||||
- [partial_merge_join_rows_in_right_blocks](../../../operations/settings/settings.md#partial_merge_join_rows_in_right_blocks)
|
||||
- [join_on_disk_max_files_to_merge](../../../operations/settings/settings.md#join_on_disk_max_files_to_merge)
|
||||
- [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys)
|
||||
|
||||
## ASOF JOIN Usage {#asof-join-usage}
|
||||
|
||||
`ASOF JOIN` is useful when you need to join records that have no exact match.
|
||||
|
||||
@ -93,7 +102,7 @@ For example, consider the following tables:
|
||||
!!! note "Note"
|
||||
`ASOF` join is **not** supported in the [Join](../../../engines/table-engines/special/join.md) table engine.
|
||||
|
||||
## Distributed Join {#global-join}
|
||||
## Distributed JOIN {#global-join}
|
||||
|
||||
There are two ways to execute join involving distributed tables:
|
||||
|
||||
@ -102,6 +111,42 @@ There are two ways to execute join involving distributed tables:
|
||||
|
||||
Be careful when using `GLOBAL`. For more information, see the [Distributed subqueries](../../../sql-reference/operators/in.md#select-distributed-subqueries) section.
|
||||
|
||||
## Implicit Type Conversion {#implicit-type-conversion}
|
||||
|
||||
`INNER JOIN`, `LEFT JOIN`, `RIGHT JOIN`, and `FULL JOIN` queries support the implicit type conversion for "join keys". However the query can not be executed, if join keys from the left and the right tables cannot be converted to a single type (for example, there is no data type that can hold all values from both `UInt64` and `Int64`, or `String` and `Int32`).
|
||||
|
||||
**Example**
|
||||
|
||||
Consider the table `t_1`:
|
||||
```text
|
||||
┌─a─┬─b─┬─toTypeName(a)─┬─toTypeName(b)─┐
|
||||
│ 1 │ 1 │ UInt16 │ UInt8 │
|
||||
│ 2 │ 2 │ UInt16 │ UInt8 │
|
||||
└───┴───┴───────────────┴───────────────┘
|
||||
```
|
||||
and the table `t_2`:
|
||||
```text
|
||||
┌──a─┬────b─┬─toTypeName(a)─┬─toTypeName(b)───┐
|
||||
│ -1 │ 1 │ Int16 │ Nullable(Int64) │
|
||||
│ 1 │ -1 │ Int16 │ Nullable(Int64) │
|
||||
│ 1 │ 1 │ Int16 │ Nullable(Int64) │
|
||||
└────┴──────┴───────────────┴─────────────────┘
|
||||
```
|
||||
|
||||
The query
|
||||
```sql
|
||||
SELECT a, b, toTypeName(a), toTypeName(b) FROM t_1 FULL JOIN t_2 USING (a, b);
|
||||
```
|
||||
returns the set:
|
||||
```text
|
||||
┌──a─┬────b─┬─toTypeName(a)─┬─toTypeName(b)───┐
|
||||
│ 1 │ 1 │ Int32 │ Nullable(Int64) │
|
||||
│ 2 │ 2 │ Int32 │ Nullable(Int64) │
|
||||
│ -1 │ 1 │ Int32 │ Nullable(Int64) │
|
||||
│ 1 │ -1 │ Int32 │ Nullable(Int64) │
|
||||
└────┴──────┴───────────────┴─────────────────┘
|
||||
```
|
||||
|
||||
## Usage Recommendations {#usage-recommendations}
|
||||
|
||||
### Processing of Empty or NULL Cells {#processing-of-empty-or-null-cells}
|
||||
@ -139,9 +184,9 @@ If you need a `JOIN` for joining with dimension tables (these are relatively sma
|
||||
|
||||
### Memory Limitations {#memory-limitations}
|
||||
|
||||
By default, ClickHouse uses the [hash join](https://en.wikipedia.org/wiki/Hash_join) algorithm. ClickHouse takes the `<right_table>` and creates a hash table for it in RAM. After some threshold of memory consumption, ClickHouse falls back to merge join algorithm.
|
||||
By default, ClickHouse uses the [hash join](https://en.wikipedia.org/wiki/Hash_join) algorithm. ClickHouse takes the right_table and creates a hash table for it in RAM. If `join_algorithm = 'auto'` is enabled, then after some threshold of memory consumption, ClickHouse falls back to [merge](https://en.wikipedia.org/wiki/Sort-merge_join) join algorithm. For `JOIN` algorithms description see the [join_algorithm](../../../operations/settings/settings.md#settings-join_algorithm) setting.
|
||||
|
||||
If you need to restrict join operation memory consumption use the following settings:
|
||||
If you need to restrict `JOIN` operation memory consumption use the following settings:
|
||||
|
||||
- [max_rows_in_join](../../../operations/settings/query-complexity.md#settings-max_rows_in_join) — Limits number of rows in the hash table.
|
||||
- [max_bytes_in_join](../../../operations/settings/query-complexity.md#settings-max_bytes_in_join) — Limits size of the hash table.
|
||||
|
@ -3,6 +3,16 @@ toc_priority: 76
|
||||
toc_title: Security Changelog
|
||||
---
|
||||
|
||||
## Fixed in ClickHouse 21.4.3.21, 2021-04-12 {#fixed-in-clickhouse-release-21-4-3-21-2021-04-12}
|
||||
|
||||
### CVE-2021-25263 {#cve-2021-25263}
|
||||
|
||||
An attacker that has CREATE DICTIONARY privilege, can read arbitary file outside permitted directory.
|
||||
|
||||
Fix has been pushed to versions 20.8.18.32-lts, 21.1.9.41-stable, 21.2.9.41-stable, 21.3.6.55-lts, 21.4.3.21-stable and later.
|
||||
|
||||
Credits: [Vyacheslav Egoshin](https://twitter.com/vegoshin)
|
||||
|
||||
## Fixed in ClickHouse Release 19.14.3.3, 2019-09-10 {#fixed-in-clickhouse-release-19-14-3-3-2019-09-10}
|
||||
|
||||
### CVE-2019-15024 {#cve-2019-15024}
|
||||
|
@ -464,7 +464,7 @@ SSLのサポートは以下によって提供されます `libpoco` 図書館
|
||||
- extendedVerification – Automatically extended verification of certificates after the session ends. Acceptable values: `true`, `false`.
|
||||
- requireTLSv1 – Require a TLSv1 connection. Acceptable values: `true`, `false`.
|
||||
- requireTLSv1_1 – Require a TLSv1.1 connection. Acceptable values: `true`, `false`.
|
||||
- requireTLSv1 – Require a TLSv1.2 connection. Acceptable values: `true`, `false`.
|
||||
- requireTLSv1_2 – Require a TLSv1.2 connection. Acceptable values: `true`, `false`.
|
||||
- fips – Activates OpenSSL FIPS mode. Supported if the library's OpenSSL version supports FIPS.
|
||||
- privateKeyPassphraseHandler – Class (PrivateKeyPassphraseHandler subclass) that requests the passphrase for accessing the private key. For example: `<privateKeyPassphraseHandler>`, `<name>KeyFileHandler</name>`, `<options><password>test</password></options>`, `</privateKeyPassphraseHandler>`.
|
||||
- invalidCertificateHandler – Class (a subclass of CertificateHandler) for verifying invalid certificates. For example: `<invalidCertificateHandler> <name>ConsoleCertificateHandler</name> </invalidCertificateHandler>` .
|
||||
|
@ -15,7 +15,7 @@ toc_title: GRANT
|
||||
## 権限構文の付与 {#grant-privigele-syntax}
|
||||
|
||||
``` sql
|
||||
GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.table|db.*|*.*|table|*} TO {user | role | CURRENT_USER} [,...] [WITH GRANT OPTION]
|
||||
GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.table|db.*|*.*|table|*} TO {user | role | CURRENT_USER} [,...] [WITH GRANT OPTION] [WITH REPLACE OPTION]
|
||||
```
|
||||
|
||||
- `privilege` — Type of privilege.
|
||||
@ -23,17 +23,19 @@ GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.ta
|
||||
- `user` — ClickHouse user account.
|
||||
|
||||
この `WITH GRANT OPTION` 句の付与 `user` または `role` 実行する許可を得て `GRANT` クエリ。 ユーザーは、持っているスコープとそれ以下の権限を付与できます。
|
||||
この `WITH REPLACE OPTION` 句は `user`または` role`の新しい特権で古い特権を置き換えます, 指定しない場合は、古い特権を古いものに追加してください
|
||||
|
||||
## ロール構文の割り当て {#assign-role-syntax}
|
||||
|
||||
``` sql
|
||||
GRANT [ON CLUSTER cluster_name] role [,...] TO {user | another_role | CURRENT_USER} [,...] [WITH ADMIN OPTION]
|
||||
GRANT [ON CLUSTER cluster_name] role [,...] TO {user | another_role | CURRENT_USER} [,...] [WITH ADMIN OPTION] [WITH REPLACE OPTION]
|
||||
```
|
||||
|
||||
- `role` — ClickHouse user role.
|
||||
- `user` — ClickHouse user account.
|
||||
|
||||
この `WITH ADMIN OPTION` 句の付与 [ADMIN OPTION](#admin-option-privilege) への特権 `user` または `role`.
|
||||
この `WITH REPLACE OPTION` 句は`user`または` role`の新しい役割によって古い役割を置き換えます, 指定しない場合は、古い特権を古いものに追加してください
|
||||
|
||||
## 使用法 {#grant-usage}
|
||||
|
||||
|
@ -128,7 +128,7 @@ Ninja - система запуска сборочных задач.
|
||||
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
|
||||
brew install cmake ninja
|
||||
|
||||
Проверьте версию CMake: `cmake --version`. Если версия меньше 3.3, то установите новую версию с сайта https://cmake.org/download/
|
||||
Проверьте версию CMake: `cmake --version`. Если версия меньше 3.12, то установите новую версию с сайта https://cmake.org/download/
|
||||
|
||||
## Необязательные внешние библиотеки {#neobiazatelnye-vneshnie-biblioteki}
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
|
||||
---
|
||||
toc_priority: 29
|
||||
toc_title: MaterializeMySQL
|
||||
@ -82,6 +83,8 @@ DDL-запросы в MySQL конвертируются в соответств
|
||||
|
||||
- Если в запросе `SELECT` напрямую не указан столбец `_sign`, то по умолчанию используется `WHERE _sign=1`. Таким образом, удаленные строки не включаются в результирующий набор.
|
||||
|
||||
- Результат включает комментарии к столбцам, если они существуют в таблицах базы данных MySQL.
|
||||
|
||||
### Конвертация индексов {#index-conversion}
|
||||
|
||||
Секции `PRIMARY KEY` и `INDEX` в MySQL конвертируются в кортежи `ORDER BY` в таблицах ClickHouse.
|
||||
|
@ -34,6 +34,7 @@ ClickHouse перезагружает встроенные словари с з
|
||||
<min_part_size>...</min_part_size>
|
||||
<min_part_size_ratio>...</min_part_size_ratio>
|
||||
<method>...</method>
|
||||
<level>...</level>
|
||||
</case>
|
||||
...
|
||||
</compression>
|
||||
@ -43,7 +44,8 @@ ClickHouse перезагружает встроенные словари с з
|
||||
|
||||
- `min_part_size` - Минимальный размер части таблицы.
|
||||
- `min_part_size_ratio` - Отношение размера минимальной части таблицы к полному размеру таблицы.
|
||||
- `method` - Метод сжатия. Возможные значения: `lz4`, `zstd`.
|
||||
- `method` - Метод сжатия. Возможные значения: `lz4`, `lz4hc`, `zstd`.
|
||||
- `level` – Уровень сжатия. См. [Кодеки](../../sql-reference/statements/create/table/#create-query-common-purpose-codecs).
|
||||
|
||||
Можно сконфигурировать несколько разделов `<case>`.
|
||||
|
||||
@ -62,6 +64,7 @@ ClickHouse проверяет условия для `min_part_size` и `min_part
|
||||
<min_part_size>10000000000</min_part_size>
|
||||
<min_part_size_ratio>0.01</min_part_size_ratio>
|
||||
<method>zstd</method>
|
||||
<level>1</level>
|
||||
</case>
|
||||
</compression>
|
||||
```
|
||||
|
@ -490,6 +490,23 @@ ClickHouse может парсить только базовый формат `Y
|
||||
|
||||
Значение по умолчанию: `ALL`.
|
||||
|
||||
## join_algorithm {#settings-join_algorithm}
|
||||
|
||||
Определяет алгоритм выполнения запроса [JOIN](../../sql-reference/statements/select/join.md).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- `hash` — используется [алгоритм соединения хешированием](https://ru.wikipedia.org/wiki/Алгоритм_соединения_хешированием).
|
||||
- `partial_merge` — используется [алгоритм соединения слиянием сортированных списков](https://ru.wikipedia.org/wiki/Алгоритм_соединения_слиянием_сортированных_списков).
|
||||
- `prefer_partial_merge` — используется алгоритм соединения слиянием сортированных списков, когда это возможно.
|
||||
- `auto` — сервер ClickHouse пытается на лету заменить алгоритм `hash` на `merge`, чтобы избежать переполнения памяти.
|
||||
|
||||
Значение по умолчанию: `hash`.
|
||||
|
||||
При использовании алгоритма `hash` правая часть `JOIN` загружается в оперативную память.
|
||||
|
||||
При использовании алгоритма `partial_merge` сервер сортирует данные и сбрасывает их на диск. Работа алгоритма `merge` в ClickHouse немного отличается от классической реализации. Сначала ClickHouse сортирует правую таблицу по блокам на основе [ключей соединения](../../sql-reference/statements/select/join.md#select-join) и для отсортированных блоков строит индексы min-max. Затем он сортирует куски левой таблицы на основе ключей соединения и объединяет их с правой таблицей операцией `JOIN`. Созданные min-max индексы используются для пропуска тех блоков из правой таблицы, которые не участвуют в данной операции `JOIN`.
|
||||
|
||||
## join_any_take_last_row {#settings-join_any_take_last_row}
|
||||
|
||||
Изменяет поведение операций, выполняемых со строгостью `ANY`.
|
||||
@ -1821,7 +1838,7 @@ ClickHouse генерирует исключение
|
||||
|
||||
Тип: unsigned int
|
||||
|
||||
озможные значения: 32 (32 байта) - 1073741824 (1 GiB)
|
||||
Возможные значения: 32 (32 байта) - 1073741824 (1 GiB)
|
||||
|
||||
Значение по умолчанию: 32768 (32 KiB)
|
||||
|
||||
@ -1835,6 +1852,16 @@ ClickHouse генерирует исключение
|
||||
|
||||
Значение по умолчанию: 16.
|
||||
|
||||
## merge_selecting_sleep_ms {#merge_selecting_sleep_ms}
|
||||
|
||||
Время ожидания для слияния выборки, если ни один кусок не выбран. Снижение времени ожидания приводит к частому выбору задач в пуле `background_schedule_pool` и увеличению количества запросов к Zookeeper в крупных кластерах.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- Положительное целое число.
|
||||
|
||||
Значение по умолчанию: `5000`.
|
||||
|
||||
## parallel_distributed_insert_select {#parallel_distributed_insert_select}
|
||||
|
||||
Включает параллельную обработку распределённых запросов `INSERT ... SELECT`.
|
||||
|
@ -9,8 +9,8 @@ toc_title: Map(key, value)
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `key` — ключ. [String](../../sql-reference/data-types/string.md) или [Integer](../../sql-reference/data-types/int-uint.md).
|
||||
- `value` — значение. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md) или [Array](../../sql-reference/data-types/array.md).
|
||||
- `key` — ключ. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md) или [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `value` — значение. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [Array](../../sql-reference/data-types/array.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md) или [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
|
||||
Чтобы получить значение из колонки `a Map('key', 'value')`, используйте синтаксис `a['key']`. В настоящее время такая подстановка работает по алгоритму с линейной сложностью.
|
||||
|
||||
|
@ -581,6 +581,7 @@ SOURCE(MYSQL(
|
||||
<db>default</db>
|
||||
<table>ids</table>
|
||||
<where>id=10</where>
|
||||
<secure>1</secure>
|
||||
</clickhouse>
|
||||
</source>
|
||||
```
|
||||
@ -596,7 +597,8 @@ SOURCE(CLICKHOUSE(
|
||||
db 'default'
|
||||
table 'ids'
|
||||
where 'id=10'
|
||||
))
|
||||
secure 1
|
||||
));
|
||||
```
|
||||
|
||||
Поля настройки:
|
||||
@ -609,6 +611,7 @@ SOURCE(CLICKHOUSE(
|
||||
- `table` — имя таблицы.
|
||||
- `where` — условие выбора. Может отсутствовать.
|
||||
- `invalidate_query` — запрос для проверки статуса словаря. Необязательный параметр. Читайте подробнее в разделе [Обновление словарей](external-dicts-dict-lifetime.md).
|
||||
- `secure` - флаг, разрешающий или не разрешающий защищённое SSL-соединение.
|
||||
|
||||
### MongoDB {#dicts-external_dicts_dict_sources-mongodb}
|
||||
|
||||
@ -769,4 +772,3 @@ Setting fields:
|
||||
- `table` – Имя таблицы.
|
||||
- `where` – Условие выборки. Синтаксис для условий такой же как для `WHERE` выражения в PostgreSQL, для примера, `id > 10 AND id < 20`. Необязательный параметр.
|
||||
- `invalidate_query` – Запрос для проверки условия загрузки словаря. Необязательный параметр. Читайте больше в разделе [Обновление словарей](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
||||
|
||||
|
@ -23,8 +23,8 @@ dictGetOrNull('dict_name', attr_name, id_expr)
|
||||
**Аргументы**
|
||||
|
||||
- `dict_name` — имя словаря. [Строковый литерал](../syntax.md#syntax-string-literal).
|
||||
- `attr_names` — имя столбца словаря, [Строковый литерал](../syntax.md#syntax-string-literal), или кортеж [Tuple](../../sql-reference/data-types/tuple.md) таких имен.
|
||||
- `id_expr` — значение ключа словаря. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../sql-reference/functions/ext-dict-functions.md) или [Tuple](../../sql-reference/functions/ext-dict-functions.md), в зависимости от конфигурации словаря.
|
||||
- `attr_names` — имя столбца словаря. [Строковый литерал](../syntax.md#syntax-string-literal), или кортеж [Tuple](../../sql-reference/data-types/tuple.md) таких имен.
|
||||
- `id_expr` — значение ключа словаря. [Expression](../../sql-reference/syntax.md#syntax-expressions) возвращает пару "ключ-значение" словаря или [Tuple](../../sql-reference/functions/ext-dict-functions.md), в зависимости от конфигурации словаря.
|
||||
- `default_value_expr` — значение, возвращаемое в том случае, когда словарь не содержит строки с заданным ключом `id_expr`. [Выражение](../syntax.md#syntax-expressions), возвращающее значение с типом данных, сконфигурированным для атрибута `attr_names`, или кортеж [Tuple](../../sql-reference/data-types/tuple.md) таких выражений.
|
||||
|
||||
**Возвращаемое значение**
|
||||
@ -87,7 +87,7 @@ SELECT
|
||||
dictGetOrDefault('ext-dict-test', 'c1', number + 1, toUInt32(number * 10)) AS val,
|
||||
toTypeName(val) AS type
|
||||
FROM system.numbers
|
||||
LIMIT 3
|
||||
LIMIT 3;
|
||||
```
|
||||
|
||||
``` text
|
||||
@ -237,7 +237,7 @@ dictHas('dict_name', id)
|
||||
**Аргументы**
|
||||
|
||||
- `dict_name` — имя словаря. [Строковый литерал](../syntax.md#syntax-string-literal).
|
||||
- `id_expr` — значение ключа словаря. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../sql-reference/functions/ext-dict-functions.md) или [Tuple](../../sql-reference/functions/ext-dict-functions.md) в зависимости от конфигурации словаря.
|
||||
- `id_expr` — значение ключа словаря. [Expression](../../sql-reference/syntax.md#syntax-expressions) возвращает пару "ключ-значение" словаря или [Tuple](../../sql-reference/functions/ext-dict-functions.md) в зависимости от конфигурации словаря.
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
|
@ -13,7 +13,7 @@ toc_title: GRANT
|
||||
## Синтаксис присвоения привилегий {#grant-privigele-syntax}
|
||||
|
||||
```sql
|
||||
GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.table|db.*|*.*|table|*} TO {user | role | CURRENT_USER} [,...] [WITH GRANT OPTION]
|
||||
GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.table|db.*|*.*|table|*} TO {user | role | CURRENT_USER} [,...] [WITH GRANT OPTION] [WITH REPLACE OPTION]
|
||||
```
|
||||
|
||||
- `privilege` — Тип привилегии
|
||||
@ -21,18 +21,20 @@ GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.ta
|
||||
- `user` — Пользователь ClickHouse.
|
||||
|
||||
`WITH GRANT OPTION` разрешает пользователю или роли выполнять запрос `GRANT`. Пользователь может выдавать только те привилегии, которые есть у него, той же или меньшей области действий.
|
||||
`WITH REPLACE OPTION` заменяет все старые привилегии новыми привилегиями для `user` или `role`, Если не указано, добавьте новые привилегии для старых.
|
||||
|
||||
|
||||
## Синтаксис назначения ролей {#assign-role-syntax}
|
||||
|
||||
```sql
|
||||
GRANT [ON CLUSTER cluster_name] role [,...] TO {user | another_role | CURRENT_USER} [,...] [WITH ADMIN OPTION]
|
||||
GRANT [ON CLUSTER cluster_name] role [,...] TO {user | another_role | CURRENT_USER} [,...] [WITH ADMIN OPTION] [WITH REPLACE OPTION]
|
||||
```
|
||||
|
||||
- `role` — Роль пользователя ClickHouse.
|
||||
- `user` — Пользователь ClickHouse.
|
||||
|
||||
`WITH ADMIN OPTION` присваивает привилегию [ADMIN OPTION](#admin-option-privilege) пользователю или роли.
|
||||
`WITH REPLACE OPTION` заменяет все старые роли новыми ролями для пользователя `user` или `role`, Если не указано, добавьте новые роли в старые.
|
||||
|
||||
## Использование {#grant-usage}
|
||||
|
||||
@ -481,4 +483,3 @@ GRANT INSERT(x,y) ON db.table TO john
|
||||
### ADMIN OPTION {#admin-option-privilege}
|
||||
|
||||
Привилегия `ADMIN OPTION` разрешает пользователю назначать свои роли другому пользователю.
|
||||
|
||||
|
@ -4,7 +4,7 @@ toc_title: JOIN
|
||||
|
||||
# Секция JOIN {#select-join}
|
||||
|
||||
Join создаёт новую таблицу путем объединения столбцов из одной или нескольких таблиц с использованием общих для каждой из них значений. Это обычная операция в базах данных с поддержкой SQL, которая соответствует join из [реляционной алгебры](https://en.wikipedia.org/wiki/Relational_algebra#Joins_and_join-like_operators). Частный случай соединения одной таблицы часто называют «self-join».
|
||||
`JOIN` создаёт новую таблицу путем объединения столбцов из одной или нескольких таблиц с использованием общих для каждой из них значений. Это обычная операция в базах данных с поддержкой SQL, которая соответствует join из [реляционной алгебры](https://en.wikipedia.org/wiki/Relational_algebra#Joins_and_join-like_operators). Частный случай соединения одной таблицы часто называют self-join.
|
||||
|
||||
Синтаксис:
|
||||
|
||||
@ -38,12 +38,21 @@ FROM <left_table>
|
||||
|
||||
## Настройки {#join-settings}
|
||||
|
||||
!!! note "Примечание"
|
||||
Значение строгости по умолчанию может быть переопределено с помощью настройки [join_default_strictness](../../../operations/settings/settings.md#settings-join_default_strictness).
|
||||
|
||||
Поведение сервера ClickHouse для операций `ANY JOIN` зависит от параметра [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys).
|
||||
|
||||
### Использование ASOF JOIN {#asof-join-usage}
|
||||
**См. также**
|
||||
|
||||
- [join_algorithm](../../../operations/settings/settings.md#settings-join_algorithm)
|
||||
- [join_any_take_last_row](../../../operations/settings/settings.md#settings-join_any_take_last_row)
|
||||
- [join_use_nulls](../../../operations/settings/settings.md#join_use_nulls)
|
||||
- [partial_merge_join_optimizations](../../../operations/settings/settings.md#partial_merge_join_optimizations)
|
||||
- [partial_merge_join_rows_in_right_blocks](../../../operations/settings/settings.md#partial_merge_join_rows_in_right_blocks)
|
||||
- [join_on_disk_max_files_to_merge](../../../operations/settings/settings.md#join_on_disk_max_files_to_merge)
|
||||
- [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys)
|
||||
|
||||
## Использование ASOF JOIN {#asof-join-usage}
|
||||
|
||||
`ASOF JOIN` применим в том случае, когда необходимо объединять записи, которые не имеют точного совпадения.
|
||||
|
||||
@ -95,7 +104,7 @@ USING (equi_column1, ... equi_columnN, asof_column)
|
||||
|
||||
Чтобы задать значение строгости по умолчанию, используйте сессионный параметр [join_default_strictness](../../../operations/settings/settings.md#settings-join_default_strictness).
|
||||
|
||||
#### Распределённый join {#global-join}
|
||||
## Распределённый JOIN {#global-join}
|
||||
|
||||
Есть два пути для выполнения соединения с участием распределённых таблиц:
|
||||
|
||||
@ -104,6 +113,42 @@ USING (equi_column1, ... equi_columnN, asof_column)
|
||||
|
||||
Будьте аккуратны при использовании `GLOBAL`. За дополнительной информацией обращайтесь в раздел [Распределенные подзапросы](../../../sql-reference/operators/in.md#select-distributed-subqueries).
|
||||
|
||||
## Неявные преобразования типов {#implicit-type-conversion}
|
||||
|
||||
Запросы `INNER JOIN`, `LEFT JOIN`, `RIGHT JOIN` и `FULL JOIN` поддерживают неявные преобразования типов для ключей соединения. Однако запрос не может быть выполнен, если не существует типа, к которому можно привести значения ключей с обеих сторон (например, нет типа, который бы одновременно вмещал в себя значения `UInt64` и `Int64`, или `String` и `Int32`).
|
||||
|
||||
**Пример**
|
||||
|
||||
Рассмотрим таблицу `t_1`:
|
||||
```text
|
||||
┌─a─┬─b─┬─toTypeName(a)─┬─toTypeName(b)─┐
|
||||
│ 1 │ 1 │ UInt16 │ UInt8 │
|
||||
│ 2 │ 2 │ UInt16 │ UInt8 │
|
||||
└───┴───┴───────────────┴───────────────┘
|
||||
```
|
||||
и таблицу `t_2`:
|
||||
```text
|
||||
┌──a─┬────b─┬─toTypeName(a)─┬─toTypeName(b)───┐
|
||||
│ -1 │ 1 │ Int16 │ Nullable(Int64) │
|
||||
│ 1 │ -1 │ Int16 │ Nullable(Int64) │
|
||||
│ 1 │ 1 │ Int16 │ Nullable(Int64) │
|
||||
└────┴──────┴───────────────┴─────────────────┘
|
||||
```
|
||||
|
||||
Запрос
|
||||
```sql
|
||||
SELECT a, b, toTypeName(a), toTypeName(b) FROM t_1 FULL JOIN t_2 USING (a, b);
|
||||
```
|
||||
вернёт результат:
|
||||
```text
|
||||
┌──a─┬────b─┬─toTypeName(a)─┬─toTypeName(b)───┐
|
||||
│ 1 │ 1 │ Int32 │ Nullable(Int64) │
|
||||
│ 2 │ 2 │ Int32 │ Nullable(Int64) │
|
||||
│ -1 │ 1 │ Int32 │ Nullable(Int64) │
|
||||
│ 1 │ -1 │ Int32 │ Nullable(Int64) │
|
||||
└────┴──────┴───────────────┴─────────────────┘
|
||||
```
|
||||
|
||||
## Рекомендации по использованию {#usage-recommendations}
|
||||
|
||||
### Обработка пустых ячеек и NULL {#processing-of-empty-or-null-cells}
|
||||
@ -142,12 +187,14 @@ USING (equi_column1, ... equi_columnN, asof_column)
|
||||
|
||||
### Ограничения по памяти {#memory-limitations}
|
||||
|
||||
По умолчанию ClickHouse использует алгоритм [hash join](https://en.wikipedia.org/wiki/Hash_join). ClickHouse берет `<right_table>` и создает для него хэш-таблицу в оперативной памяти. После некоторого порога потребления памяти ClickHouse переходит к алгоритму merge join.
|
||||
По умолчанию ClickHouse использует алгоритм [hash join](https://ru.wikipedia.org/wiki/Алгоритм_соединения_хешированием). ClickHouse берет правую таблицу и создает для нее хеш-таблицу в оперативной памяти. При включённой настройке `join_algorithm = 'auto'`, после некоторого порога потребления памяти ClickHouse переходит к алгоритму [merge join](https://ru.wikipedia.org/wiki/Алгоритм_соединения_слиянием_сортированных_списков). Описание алгоритмов `JOIN` см. в настройке [join_algorithm](../../../operations/settings/settings.md#settings-join_algorithm).
|
||||
|
||||
- [max_rows_in_join](../../../operations/settings/query-complexity.md#settings-max_rows_in_join) — ограничивает количество строк в хэш-таблице.
|
||||
- [max_bytes_in_join](../../../operations/settings/query-complexity.md#settings-max_bytes_in_join) — ограничивает размер хэш-таблицы.
|
||||
Если вы хотите ограничить потребление памяти во время выполнения операции `JOIN`, используйте настройки:
|
||||
|
||||
По достижении любого из этих ограничений, ClickHouse действует в соответствии с настройкой [join_overflow_mode](../../../operations/settings/query-complexity.md#settings-join_overflow_mode).
|
||||
- [max_rows_in_join](../../../operations/settings/query-complexity.md#settings-max_rows_in_join) — ограничивает количество строк в хеш-таблице.
|
||||
- [max_bytes_in_join](../../../operations/settings/query-complexity.md#settings-max_bytes_in_join) — ограничивает размер хеш-таблицы.
|
||||
|
||||
По достижении любого из этих ограничений ClickHouse действует в соответствии с настройкой [join_overflow_mode](../../../operations/settings/query-complexity.md#settings-join_overflow_mode).
|
||||
|
||||
## Примеры {#examples}
|
||||
|
||||
|
@ -5,6 +5,17 @@ toc_title: Security Changelog
|
||||
|
||||
# Security Changelog {#security-changelog}
|
||||
|
||||
## Исправлено в релизе 21.4.3.21, 2021-04-12 {#fixed-in-clickhouse-release-21-4-3-21-2019-09-10}
|
||||
|
||||
### CVE-2021-25263 {#cve-2021-25263}
|
||||
|
||||
Злоумышленник с доступом к созданию словарей может читать файлы на файловой системе сервера Clickhouse.
|
||||
Злоумышленник может обойти некорректную проверку пути к файлу словаря и загрузить часть любого файла как словарь. При этом, манипулируя опциями парсинга файла, можно получить следующую часть файла и пошагово прочитать весь файл.
|
||||
|
||||
Исправление доступно в версиях 20.8.18.32-lts, 21.1.9.41-stable, 21.2.9.41-stable, 21.3.6.55-lts, 21.4.3.21-stable и выше.
|
||||
|
||||
Обнаружено благодаря: [Вячеславу Егошину](https://twitter.com/vegoshin)
|
||||
|
||||
## Исправлено в релизе 19.14.3.3, 2019-09-10 {#ispravleno-v-relize-19-14-3-3-2019-09-10}
|
||||
|
||||
### CVE-2019-15024 {#cve-2019-15024}
|
||||
|
@ -8,9 +8,9 @@ toc_title: "\u6982\u8FF0"
|
||||
|
||||
# ClickHouse指南 {#clickhouse-guides}
|
||||
|
||||
详细的一步一步的说明,帮助解决使用ClickHouse的各种任务列表:
|
||||
列出了如何使用 Clickhouse 解决各种任务的详细说明:
|
||||
|
||||
- [简单集群设置教程](../getting-started/tutorial.md)
|
||||
- [关于简单集群设置的教程](../getting-started/tutorial.md)
|
||||
- [在ClickHouse中应用CatBoost模型](apply-catboost-model.md)
|
||||
|
||||
[原始文章](https://clickhouse.tech/docs/en/guides/) <!--hide-->
|
||||
|
@ -462,7 +462,7 @@ SSL客户端/服务器配置。
|
||||
- extendedVerification – Automatically extended verification of certificates after the session ends. Acceptable values: `true`, `false`.
|
||||
- requireTLSv1 – Require a TLSv1 connection. Acceptable values: `true`, `false`.
|
||||
- requireTLSv1_1 – Require a TLSv1.1 connection. Acceptable values: `true`, `false`.
|
||||
- requireTLSv1 – Require a TLSv1.2 connection. Acceptable values: `true`, `false`.
|
||||
- requireTLSv1_2 – Require a TLSv1.2 connection. Acceptable values: `true`, `false`.
|
||||
- fips – Activates OpenSSL FIPS mode. Supported if the library’s OpenSSL version supports FIPS.
|
||||
- privateKeyPassphraseHandler – Class (PrivateKeyPassphraseHandler subclass) that requests the passphrase for accessing the private key. For example: `<privateKeyPassphraseHandler>`, `<name>KeyFileHandler</name>`, `<options><password>test</password></options>`, `</privateKeyPassphraseHandler>`.
|
||||
- invalidCertificateHandler – Class (a subclass of CertificateHandler) for verifying invalid certificates. For example: `<invalidCertificateHandler> <name>ConsoleCertificateHandler</name> </invalidCertificateHandler>` .
|
||||
|
@ -12,7 +12,7 @@ toc_title: 授权操作
|
||||
## 授权操作语法 {#grant-privigele-syntax}
|
||||
|
||||
``` sql
|
||||
GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.table|db.*|*.*|table|*} TO {user | role | CURRENT_USER} [,...] [WITH GRANT OPTION]
|
||||
GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.table|db.*|*.*|table|*} TO {user | role | CURRENT_USER} [,...] [WITH GRANT OPTION] [WITH REPLACE OPTION]
|
||||
```
|
||||
|
||||
- `privilege` — 权限类型
|
||||
@ -20,17 +20,19 @@ GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.ta
|
||||
- `user` — 用户账号
|
||||
|
||||
`WITH GRANT OPTION` 授予 `user` 或 `role`执行 `GRANT` 操作的权限。用户可将在自身权限范围内的权限进行授权
|
||||
`WITH REPLACE OPTION` 以当前sql里的新权限替代掉 `user` 或 `role`的旧权限,如果没有该选项则是追加授权。
|
||||
|
||||
## 角色分配的语法 {#assign-role-syntax}
|
||||
|
||||
``` sql
|
||||
GRANT [ON CLUSTER cluster_name] role [,...] TO {user | another_role | CURRENT_USER} [,...] [WITH ADMIN OPTION]
|
||||
GRANT [ON CLUSTER cluster_name] role [,...] TO {user | another_role | CURRENT_USER} [,...] [WITH ADMIN OPTION] [WITH REPLACE OPTION]
|
||||
```
|
||||
|
||||
- `role` — 角色
|
||||
- `user` — 用户
|
||||
|
||||
`WITH ADMIN OPTION` 授予 `user` 或 `role` 执行[ADMIN OPTION](#admin-option-privilege) 的权限
|
||||
`WITH REPLACE OPTION` 以当前sql里的新role替代掉 `user` 或 `role`的旧role,如果没有该选项则是追加roles。
|
||||
|
||||
## 用法 {#grant-usage}
|
||||
|
||||
|
@ -271,7 +271,8 @@ private:
|
||||
|
||||
if (max_time > 0 && total_watch.elapsedSeconds() >= max_time)
|
||||
{
|
||||
std::cout << "Stopping launch of queries. Requested time limit is exhausted.\n";
|
||||
std::cout << "Stopping launch of queries."
|
||||
<< " Requested time limit " << max_time << " seconds is exhausted.\n";
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -368,8 +369,7 @@ private:
|
||||
{
|
||||
extracted = queue.tryPop(query, 100);
|
||||
|
||||
if (shutdown
|
||||
|| (max_iterations && queries_executed == max_iterations))
|
||||
if (shutdown || (max_iterations && queries_executed == max_iterations))
|
||||
{
|
||||
return;
|
||||
}
|
||||
@ -382,8 +382,9 @@ private:
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
std::cerr << "An error occurred while processing the query '"
|
||||
<< query << "'.\n";
|
||||
std::lock_guard lock(mutex);
|
||||
std::cerr << "An error occurred while processing the query " << "'" << query << "'"
|
||||
<< ": " << getCurrentExceptionMessage(false) << std::endl;
|
||||
if (!continue_on_errors)
|
||||
{
|
||||
shutdown = true;
|
||||
|
@ -26,6 +26,9 @@
|
||||
#include <boost/algorithm/string/replace.hpp>
|
||||
#include <Poco/String.h>
|
||||
#include <Poco/Util/Application.h>
|
||||
#include <Processors/Formats/IInputFormat.h>
|
||||
#include <Processors/Executors/PullingAsyncPipelineExecutor.h>
|
||||
#include <Processors/QueryPipeline.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <common/find_symbols.h>
|
||||
#include <common/LineReader.h>
|
||||
@ -55,8 +58,7 @@
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/UseSSL.h>
|
||||
#include <IO/WriteBufferFromOStream.h>
|
||||
#include <DataStreams/AsynchronousBlockInputStream.h>
|
||||
#include <DataStreams/AddingDefaultsBlockInputStream.h>
|
||||
#include <Processors/Transforms/AddingDefaultsTransform.h>
|
||||
#include <DataStreams/InternalTextLogsRowOutputStream.h>
|
||||
#include <DataStreams/NullBlockOutputStream.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
@ -80,6 +82,7 @@
|
||||
#include <Functions/registerFunctions.h>
|
||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||
#include <Formats/registerFormats.h>
|
||||
#include <Formats/FormatFactory.h>
|
||||
#include <Common/Config/configReadClient.h>
|
||||
#include <Storages/ColumnsDescription.h>
|
||||
#include <common/argsToConfig.h>
|
||||
@ -422,6 +425,7 @@ private:
|
||||
{TokenType::Semicolon, Replxx::Color::INTENSE},
|
||||
{TokenType::Dot, Replxx::Color::INTENSE},
|
||||
{TokenType::Asterisk, Replxx::Color::INTENSE},
|
||||
{TokenType::HereDoc, Replxx::Color::CYAN},
|
||||
{TokenType::Plus, Replxx::Color::INTENSE},
|
||||
{TokenType::Minus, Replxx::Color::INTENSE},
|
||||
{TokenType::Slash, Replxx::Color::INTENSE},
|
||||
@ -447,8 +451,7 @@ private:
|
||||
{TokenType::ErrorDoubleQuoteIsNotClosed, Replxx::Color::RED},
|
||||
{TokenType::ErrorSinglePipeMark, Replxx::Color::RED},
|
||||
{TokenType::ErrorWrongNumber, Replxx::Color::RED},
|
||||
{ TokenType::ErrorMaxQuerySizeExceeded,
|
||||
Replxx::Color::RED }};
|
||||
{TokenType::ErrorMaxQuerySizeExceeded, Replxx::Color::RED }};
|
||||
|
||||
const Replxx::Color unknown_token_color = Replxx::Color::RED;
|
||||
|
||||
@ -606,9 +609,9 @@ private:
|
||||
std::cout << "Warnings:" << std::endl;
|
||||
for (const auto & message : messages)
|
||||
std::cout << "* " << message << std::endl;
|
||||
}
|
||||
std::cout << std::endl;
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
/// Ignore exception
|
||||
@ -1925,19 +1928,24 @@ private:
|
||||
current_format = insert->format;
|
||||
}
|
||||
|
||||
BlockInputStreamPtr block_input = context->getInputFormat(current_format, buf, sample, insert_format_max_block_size);
|
||||
auto source = FormatFactory::instance().getInput(current_format, buf, sample, context, insert_format_max_block_size);
|
||||
Pipe pipe(source);
|
||||
|
||||
if (columns_description.hasDefaults())
|
||||
block_input = std::make_shared<AddingDefaultsBlockInputStream>(block_input, columns_description, context);
|
||||
|
||||
BlockInputStreamPtr async_block_input = std::make_shared<AsynchronousBlockInputStream>(block_input);
|
||||
|
||||
async_block_input->readPrefix();
|
||||
|
||||
while (true)
|
||||
{
|
||||
Block block = async_block_input->read();
|
||||
pipe.addSimpleTransform([&](const Block & header)
|
||||
{
|
||||
return std::make_shared<AddingDefaultsTransform>(header, columns_description, *source, context);
|
||||
});
|
||||
}
|
||||
|
||||
QueryPipeline pipeline;
|
||||
pipeline.init(std::move(pipe));
|
||||
PullingAsyncPipelineExecutor executor(pipeline);
|
||||
|
||||
Block block;
|
||||
while (executor.pull(block))
|
||||
{
|
||||
/// Check if server send Log packet
|
||||
receiveLogs();
|
||||
|
||||
@ -1949,18 +1957,18 @@ private:
|
||||
* We're exiting with error, so it makes sense to kill the
|
||||
* input stream without waiting for it to complete.
|
||||
*/
|
||||
async_block_input->cancel(true);
|
||||
executor.cancel();
|
||||
return;
|
||||
}
|
||||
|
||||
if (block)
|
||||
{
|
||||
connection->sendData(block);
|
||||
processed_rows += block.rows();
|
||||
|
||||
if (!block)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
async_block_input->readSuffix();
|
||||
connection->sendData({});
|
||||
}
|
||||
|
||||
|
||||
|
@ -1702,14 +1702,15 @@ void ClusterCopier::dropParticularPartitionPieceFromAllHelpingTables(const TaskT
|
||||
LOG_INFO(log, "All helping tables dropped partition {}", partition_name);
|
||||
}
|
||||
|
||||
String ClusterCopier::getRemoteCreateTable(const DatabaseAndTableName & table, Connection & connection, const Settings & settings)
|
||||
String ClusterCopier::getRemoteCreateTable(
|
||||
const DatabaseAndTableName & table, Connection & connection, const Settings & settings)
|
||||
{
|
||||
auto remote_context = Context::createCopy(context);
|
||||
remote_context->setSettings(settings);
|
||||
|
||||
String query = "SHOW CREATE TABLE " + getQuotedTable(table);
|
||||
Block block = getBlockWithAllStreamData(std::make_shared<RemoteBlockInputStream>(
|
||||
connection, query, InterpreterShowCreateQuery::getSampleBlock(), remote_context));
|
||||
Block block = getBlockWithAllStreamData(
|
||||
std::make_shared<RemoteBlockInputStream>(connection, query, InterpreterShowCreateQuery::getSampleBlock(), remote_context));
|
||||
|
||||
return typeid_cast<const ColumnString &>(*block.safeGetByPosition(0).column).getDataAt(0).toString();
|
||||
}
|
||||
@ -1719,10 +1720,8 @@ ASTPtr ClusterCopier::getCreateTableForPullShard(const ConnectionTimeouts & time
|
||||
{
|
||||
/// Fetch and parse (possibly) new definition
|
||||
auto connection_entry = task_shard.info.pool->get(timeouts, &task_cluster->settings_pull, true);
|
||||
String create_query_pull_str = getRemoteCreateTable(
|
||||
task_shard.task_table.table_pull,
|
||||
*connection_entry,
|
||||
task_cluster->settings_pull);
|
||||
String create_query_pull_str
|
||||
= getRemoteCreateTable(task_shard.task_table.table_pull, *connection_entry, task_cluster->settings_pull);
|
||||
|
||||
ParserCreateQuery parser_create_query;
|
||||
const auto & settings = getContext()->getSettingsRef();
|
||||
|
@ -433,7 +433,7 @@ void LocalServer::processQueries()
|
||||
|
||||
try
|
||||
{
|
||||
executeQuery(read_buf, write_buf, /* allow_into_outfile = */ true, context, {}, finalize_progress);
|
||||
executeQuery(read_buf, write_buf, /* allow_into_outfile = */ true, context, {}, {}, finalize_progress);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -15,8 +15,8 @@
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <DataTypes/DataTypeUUID.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <DataStreams/IBlockOutputStream.h>
|
||||
#include <DataStreams/LimitBlockInputStream.h>
|
||||
#include <Processors/Pipe.h>
|
||||
#include <Processors/LimitTransform.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/UTF8Helpers.h>
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
@ -24,6 +24,10 @@
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Formats/registerFormats.h>
|
||||
#include <Formats/FormatFactory.h>
|
||||
#include <Processors/Formats/IInputFormat.h>
|
||||
#include <Processors/QueryPipeline.h>
|
||||
#include <Processors/Executors/PullingPipelineExecutor.h>
|
||||
#include <Core/Block.h>
|
||||
#include <common/StringRef.h>
|
||||
#include <common/DateLUT.h>
|
||||
@ -1156,17 +1160,20 @@ try
|
||||
if (!silent)
|
||||
std::cerr << "Training models\n";
|
||||
|
||||
BlockInputStreamPtr input = context->getInputFormat(input_format, file_in, header, max_block_size);
|
||||
Pipe pipe(FormatFactory::instance().getInput(input_format, file_in, header, context, max_block_size));
|
||||
|
||||
input->readPrefix();
|
||||
while (Block block = input->read())
|
||||
QueryPipeline pipeline;
|
||||
pipeline.init(std::move(pipe));
|
||||
PullingPipelineExecutor executor(pipeline);
|
||||
|
||||
Block block;
|
||||
while (executor.pull(block))
|
||||
{
|
||||
obfuscator.train(block.getColumns());
|
||||
source_rows += block.rows();
|
||||
if (!silent)
|
||||
std::cerr << "Processed " << source_rows << " rows\n";
|
||||
}
|
||||
input->readSuffix();
|
||||
}
|
||||
|
||||
obfuscator.finalize();
|
||||
@ -1183,15 +1190,26 @@ try
|
||||
|
||||
file_in.seek(0, SEEK_SET);
|
||||
|
||||
BlockInputStreamPtr input = context->getInputFormat(input_format, file_in, header, max_block_size);
|
||||
BlockOutputStreamPtr output = context->getOutputStreamParallelIfPossible(output_format, file_out, header);
|
||||
Pipe pipe(FormatFactory::instance().getInput(input_format, file_in, header, context, max_block_size));
|
||||
|
||||
if (processed_rows + source_rows > limit)
|
||||
input = std::make_shared<LimitBlockInputStream>(input, limit - processed_rows, 0);
|
||||
{
|
||||
pipe.addSimpleTransform([&](const Block & cur_header)
|
||||
{
|
||||
return std::make_shared<LimitTransform>(cur_header, limit - processed_rows, 0);
|
||||
});
|
||||
}
|
||||
|
||||
QueryPipeline pipeline;
|
||||
pipeline.init(std::move(pipe));
|
||||
|
||||
BlockOutputStreamPtr output = context->getOutputStreamParallelIfPossible(output_format, file_out, header);
|
||||
|
||||
PullingPipelineExecutor executor(pipeline);
|
||||
|
||||
input->readPrefix();
|
||||
output->writePrefix();
|
||||
while (Block block = input->read())
|
||||
Block block;
|
||||
while (executor.pull(block))
|
||||
{
|
||||
Columns columns = obfuscator.generate(block.getColumns());
|
||||
output->write(header.cloneWithColumns(columns));
|
||||
@ -1200,7 +1218,6 @@ try
|
||||
std::cerr << "Processed " << processed_rows << " rows\n";
|
||||
}
|
||||
output->writeSuffix();
|
||||
input->readSuffix();
|
||||
|
||||
obfuscator.updateSeed();
|
||||
}
|
||||
|
@ -59,6 +59,7 @@
|
||||
#include <TableFunctions/registerTableFunctions.h>
|
||||
#include <Formats/registerFormats.h>
|
||||
#include <Storages/registerStorages.h>
|
||||
#include <DataStreams/ConnectionCollector.h>
|
||||
#include <Dictionaries/registerDictionaries.h>
|
||||
#include <Disks/registerDisks.h>
|
||||
#include <Common/Config/ConfigReloader.h>
|
||||
@ -503,6 +504,8 @@ if (ThreadFuzzer::instance().isEffective())
|
||||
// ignore `max_thread_pool_size` in configs we fetch from ZK, but oh well.
|
||||
GlobalThreadPool::initialize(config().getUInt("max_thread_pool_size", 10000));
|
||||
|
||||
ConnectionCollector::init(global_context, config().getUInt("max_threads_for_connection_collector", 10));
|
||||
|
||||
bool has_zookeeper = config().has("zookeeper");
|
||||
|
||||
zkutil::ZooKeeperNodeCache main_config_zk_node_cache([&] { return global_context->getZooKeeper(); });
|
||||
|
@ -489,11 +489,12 @@ std::shared_ptr<const EnabledSettings> AccessControlManager::getEnabledSettings(
|
||||
return settings_profiles_cache->getEnabledSettings(user_id, settings_from_user, enabled_roles, settings_from_enabled_roles);
|
||||
}
|
||||
|
||||
std::shared_ptr<const SettingsChanges> AccessControlManager::getProfileSettings(const String & profile_name) const
|
||||
std::shared_ptr<const SettingsProfilesInfo> AccessControlManager::getSettingsProfileInfo(const UUID & profile_id)
|
||||
{
|
||||
return settings_profiles_cache->getProfileSettings(profile_name);
|
||||
return settings_profiles_cache->getSettingsProfileInfo(profile_id);
|
||||
}
|
||||
|
||||
|
||||
const ExternalAuthenticators & AccessControlManager::getExternalAuthenticators() const
|
||||
{
|
||||
return *external_authenticators;
|
||||
|
@ -32,8 +32,7 @@ class RowPolicyCache;
|
||||
class EnabledQuota;
|
||||
class QuotaCache;
|
||||
struct QuotaUsage;
|
||||
struct SettingsProfile;
|
||||
using SettingsProfilePtr = std::shared_ptr<const SettingsProfile>;
|
||||
struct SettingsProfilesInfo;
|
||||
class EnabledSettings;
|
||||
class SettingsProfilesCache;
|
||||
class SettingsProfileElements;
|
||||
@ -145,7 +144,7 @@ public:
|
||||
const boost::container::flat_set<UUID> & enabled_roles,
|
||||
const SettingsProfileElements & settings_from_enabled_roles) const;
|
||||
|
||||
std::shared_ptr<const SettingsChanges> getProfileSettings(const String & profile_name) const;
|
||||
std::shared_ptr<const SettingsProfilesInfo> getSettingsProfileInfo(const UUID & profile_id);
|
||||
|
||||
const ExternalAuthenticators & getExternalAuthenticators() const;
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <Access/User.h>
|
||||
#include <Access/EnabledRolesInfo.h>
|
||||
#include <Access/EnabledSettings.h>
|
||||
#include <Access/SettingsProfilesInfo.h>
|
||||
#include <Interpreters/DatabaseCatalog.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/quoteString.h>
|
||||
@ -307,23 +308,25 @@ std::shared_ptr<const ContextAccess> ContextAccess::getFullAccess()
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<const Settings> ContextAccess::getDefaultSettings() const
|
||||
SettingsChanges ContextAccess::getDefaultSettings() const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
if (enabled_settings)
|
||||
return enabled_settings->getSettings();
|
||||
static const auto everything_by_default = std::make_shared<Settings>();
|
||||
return everything_by_default;
|
||||
{
|
||||
if (auto info = enabled_settings->getInfo())
|
||||
return info->settings;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<const SettingsConstraints> ContextAccess::getSettingsConstraints() const
|
||||
std::shared_ptr<const SettingsProfilesInfo> ContextAccess::getDefaultProfileInfo() const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
if (enabled_settings)
|
||||
return enabled_settings->getConstraints();
|
||||
static const auto no_constraints = std::make_shared<SettingsConstraints>();
|
||||
return no_constraints;
|
||||
return enabled_settings->getInfo();
|
||||
static const auto everything_by_default = std::make_shared<SettingsProfilesInfo>(*manager);
|
||||
return everything_by_default;
|
||||
}
|
||||
|
||||
|
||||
|
@ -23,7 +23,8 @@ class EnabledQuota;
|
||||
class EnabledSettings;
|
||||
struct QuotaUsage;
|
||||
struct Settings;
|
||||
class SettingsConstraints;
|
||||
struct SettingsProfilesInfo;
|
||||
class SettingsChanges;
|
||||
class AccessControlManager;
|
||||
class IAST;
|
||||
using ASTPtr = std::shared_ptr<IAST>;
|
||||
@ -84,11 +85,9 @@ public:
|
||||
std::shared_ptr<const EnabledQuota> getQuota() const;
|
||||
std::optional<QuotaUsage> getQuotaUsage() const;
|
||||
|
||||
/// Returns the default settings, i.e. the settings to apply on user's login.
|
||||
std::shared_ptr<const Settings> getDefaultSettings() const;
|
||||
|
||||
/// Returns the settings' constraints.
|
||||
std::shared_ptr<const SettingsConstraints> getSettingsConstraints() const;
|
||||
/// Returns the default settings, i.e. the settings which should be applied on user's login.
|
||||
SettingsChanges getDefaultSettings() const;
|
||||
std::shared_ptr<const SettingsProfilesInfo> getDefaultProfileInfo() const;
|
||||
|
||||
/// Returns the current access rights.
|
||||
std::shared_ptr<const AccessRights> getAccessRights() const;
|
||||
|
@ -11,27 +11,16 @@ EnabledSettings::EnabledSettings(const Params & params_) : params(params_)
|
||||
|
||||
EnabledSettings::~EnabledSettings() = default;
|
||||
|
||||
|
||||
std::shared_ptr<const Settings> EnabledSettings::getSettings() const
|
||||
std::shared_ptr<const SettingsProfilesInfo> EnabledSettings::getInfo() const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
return settings;
|
||||
return info;
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<const SettingsConstraints> EnabledSettings::getConstraints() const
|
||||
void EnabledSettings::setInfo(const std::shared_ptr<const SettingsProfilesInfo> & info_)
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
return constraints;
|
||||
}
|
||||
|
||||
|
||||
void EnabledSettings::setSettingsAndConstraints(
|
||||
const std::shared_ptr<const Settings> & settings_, const std::shared_ptr<const SettingsConstraints> & constraints_)
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
settings = settings_;
|
||||
constraints = constraints_;
|
||||
info = info_;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,15 +1,15 @@
|
||||
#pragma once
|
||||
|
||||
#include <common/types.h>
|
||||
#include <Core/UUID.h>
|
||||
#include <Access/SettingsConstraints.h>
|
||||
#include <Access/SettingsProfileElement.h>
|
||||
#include <Core/UUID.h>
|
||||
#include <boost/container/flat_set.hpp>
|
||||
#include <mutex>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
struct SettingsProfilesInfo;
|
||||
|
||||
/// Watches settings profiles for a specific user and roles.
|
||||
class EnabledSettings
|
||||
{
|
||||
@ -30,27 +30,19 @@ public:
|
||||
friend bool operator >=(const Params & lhs, const Params & rhs) { return !(lhs < rhs); }
|
||||
};
|
||||
|
||||
~EnabledSettings();
|
||||
|
||||
/// Returns the default settings come from settings profiles defined for the user
|
||||
/// and the roles passed in the constructor.
|
||||
std::shared_ptr<const Settings> getSettings() const;
|
||||
std::shared_ptr<const SettingsProfilesInfo> getInfo() const;
|
||||
|
||||
/// Returns the constraints come from settings profiles defined for the user
|
||||
/// and the roles passed in the constructor.
|
||||
std::shared_ptr<const SettingsConstraints> getConstraints() const;
|
||||
~EnabledSettings();
|
||||
|
||||
private:
|
||||
friend class SettingsProfilesCache;
|
||||
EnabledSettings(const Params & params_);
|
||||
|
||||
void setSettingsAndConstraints(
|
||||
const std::shared_ptr<const Settings> & settings_, const std::shared_ptr<const SettingsConstraints> & constraints_);
|
||||
void setInfo(const std::shared_ptr<const SettingsProfilesInfo> & info_);
|
||||
|
||||
const Params params;
|
||||
SettingsProfileElements settings_from_enabled;
|
||||
std::shared_ptr<const Settings> settings;
|
||||
std::shared_ptr<const SettingsConstraints> constraints;
|
||||
std::shared_ptr<const SettingsProfilesInfo> info;
|
||||
mutable std::mutex mutex;
|
||||
};
|
||||
}
|
||||
|
@ -3,7 +3,6 @@
|
||||
#include <boost/range/algorithm/set_algorithm.hpp>
|
||||
#include <boost/range/algorithm_ext/erase.hpp>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
void GrantedRoles::grant(const UUID & role_)
|
||||
@ -80,7 +79,7 @@ std::vector<UUID> GrantedRoles::findGranted(const boost::container::flat_set<UUI
|
||||
{
|
||||
std::vector<UUID> res;
|
||||
res.reserve(ids.size());
|
||||
boost::range::set_difference(ids, roles, std::back_inserter(res));
|
||||
boost::range::set_intersection(ids, roles, std::back_inserter(res));
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -111,7 +110,7 @@ std::vector<UUID> GrantedRoles::findGrantedWithAdminOption(const boost::containe
|
||||
{
|
||||
std::vector<UUID> res;
|
||||
res.reserve(ids.size());
|
||||
boost::range::set_difference(ids, roles_with_admin_option, std::back_inserter(res));
|
||||
boost::range::set_intersection(ids, roles_with_admin_option, std::back_inserter(res));
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -197,6 +197,16 @@ String IAccessStorage::readName(const UUID & id) const
|
||||
}
|
||||
|
||||
|
||||
Strings IAccessStorage::readNames(const std::vector<UUID> & ids) const
|
||||
{
|
||||
Strings res;
|
||||
res.reserve(ids.size());
|
||||
for (const auto & id : ids)
|
||||
res.emplace_back(readName(id));
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
std::optional<String> IAccessStorage::tryReadName(const UUID & id) const
|
||||
{
|
||||
String name;
|
||||
@ -207,6 +217,19 @@ std::optional<String> IAccessStorage::tryReadName(const UUID & id) const
|
||||
}
|
||||
|
||||
|
||||
Strings IAccessStorage::tryReadNames(const std::vector<UUID> & ids) const
|
||||
{
|
||||
Strings res;
|
||||
res.reserve(ids.size());
|
||||
for (const auto & id : ids)
|
||||
{
|
||||
if (auto name = tryReadName(id))
|
||||
res.emplace_back(std::move(name).value());
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
UUID IAccessStorage::insert(const AccessEntityPtr & entity)
|
||||
{
|
||||
return insertImpl(entity, false);
|
||||
|
@ -84,7 +84,9 @@ public:
|
||||
|
||||
/// Reads only name of an entity.
|
||||
String readName(const UUID & id) const;
|
||||
Strings readNames(const std::vector<UUID> & ids) const;
|
||||
std::optional<String> tryReadName(const UUID & id) const;
|
||||
Strings tryReadNames(const std::vector<UUID> & ids) const;
|
||||
|
||||
/// Returns true if a specified entity can be inserted into this storage.
|
||||
/// This function doesn't check whether there are no entities with such name in the storage.
|
||||
|
@ -18,8 +18,6 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
|
||||
SettingsConstraints::SettingsConstraints() = default;
|
||||
|
||||
SettingsConstraints::SettingsConstraints(const AccessControlManager & manager_) : manager(&manager_)
|
||||
{
|
||||
}
|
||||
@ -201,13 +199,10 @@ bool SettingsConstraints::checkImpl(const Settings & current_settings, SettingCh
|
||||
}
|
||||
};
|
||||
|
||||
if (manager)
|
||||
{
|
||||
if (reaction == THROW_ON_VIOLATION)
|
||||
manager->checkSettingNameIsAllowed(setting_name);
|
||||
else if (!manager->isSettingNameAllowed(setting_name))
|
||||
return false;
|
||||
}
|
||||
|
||||
Field current_value, new_value;
|
||||
if (current_settings.tryGet(setting_name, current_value))
|
||||
|
@ -51,7 +51,6 @@ class AccessControlManager;
|
||||
class SettingsConstraints
|
||||
{
|
||||
public:
|
||||
SettingsConstraints();
|
||||
SettingsConstraints(const AccessControlManager & manager_);
|
||||
SettingsConstraints(const SettingsConstraints & src);
|
||||
SettingsConstraints & operator =(const SettingsConstraints & src);
|
||||
|
21
src/Access/SettingsConstraintsAndProfileIDs.h
Normal file
21
src/Access/SettingsConstraintsAndProfileIDs.h
Normal file
@ -0,0 +1,21 @@
|
||||
#pragma once
|
||||
|
||||
#include <Access/SettingsConstraints.h>
|
||||
#include <Core/UUID.h>
|
||||
#include <vector>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// Information about currently applied constraints and profiles.
|
||||
struct SettingsConstraintsAndProfileIDs
|
||||
{
|
||||
SettingsConstraints constraints;
|
||||
std::vector<UUID> current_profiles;
|
||||
std::vector<UUID> enabled_profiles;
|
||||
|
||||
SettingsConstraintsAndProfileIDs(const AccessControlManager & manager_) : constraints(manager_) {}
|
||||
};
|
||||
|
||||
}
|
@ -7,6 +7,7 @@
|
||||
#include <Common/SettingsChanges.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <common/removeDuplicates.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -172,4 +173,21 @@ SettingsConstraints SettingsProfileElements::toSettingsConstraints(const AccessC
|
||||
return res;
|
||||
}
|
||||
|
||||
std::vector<UUID> SettingsProfileElements::toProfileIDs() const
|
||||
{
|
||||
std::vector<UUID> res;
|
||||
for (const auto & elem : *this)
|
||||
{
|
||||
if (elem.parent_profile)
|
||||
res.push_back(*elem.parent_profile);
|
||||
}
|
||||
|
||||
/// If some profile occurs multiple times (with some other settings in between),
|
||||
/// the latest occurrence overrides all the previous ones.
|
||||
removeDuplicatesKeepLast(res);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -62,6 +62,7 @@ public:
|
||||
Settings toSettings() const;
|
||||
SettingsChanges toSettingsChanges() const;
|
||||
SettingsConstraints toSettingsConstraints(const AccessControlManager & manager) const;
|
||||
std::vector<UUID> toProfileIDs() const;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -1,11 +1,8 @@
|
||||
#include <Access/SettingsProfilesCache.h>
|
||||
#include <Access/AccessControlManager.h>
|
||||
#include <Access/SettingsProfile.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <Common/SettingsChanges.h>
|
||||
#include <Access/SettingsProfilesInfo.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
#include <boost/range/algorithm_ext/push_back.hpp>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -15,7 +12,6 @@ namespace ErrorCodes
|
||||
extern const int THERE_IS_NO_PROFILE;
|
||||
}
|
||||
|
||||
|
||||
SettingsProfilesCache::SettingsProfilesCache(const AccessControlManager & manager_)
|
||||
: manager(manager_) {}
|
||||
|
||||
@ -67,7 +63,7 @@ void SettingsProfilesCache::profileAddedOrChanged(const UUID & profile_id, const
|
||||
profiles_by_name.erase(old_profile->getName());
|
||||
profiles_by_name[new_profile->getName()] = profile_id;
|
||||
}
|
||||
settings_for_profiles.clear();
|
||||
profile_infos_cache.clear();
|
||||
mergeSettingsAndConstraints();
|
||||
}
|
||||
|
||||
@ -80,7 +76,7 @@ void SettingsProfilesCache::profileRemoved(const UUID & profile_id)
|
||||
return;
|
||||
profiles_by_name.erase(it->second->getName());
|
||||
all_profiles.erase(it);
|
||||
settings_for_profiles.clear();
|
||||
profile_infos_cache.clear();
|
||||
mergeSettingsAndConstraints();
|
||||
}
|
||||
|
||||
@ -142,49 +138,52 @@ void SettingsProfilesCache::mergeSettingsAndConstraintsFor(EnabledSettings & ena
|
||||
merged_settings.merge(enabled.params.settings_from_enabled_roles);
|
||||
merged_settings.merge(enabled.params.settings_from_user);
|
||||
|
||||
substituteProfiles(merged_settings);
|
||||
auto info = std::make_shared<SettingsProfilesInfo>(manager);
|
||||
info->profiles = enabled.params.settings_from_user.toProfileIDs();
|
||||
substituteProfiles(merged_settings, info->profiles_with_implicit, info->names_of_profiles);
|
||||
info->settings = merged_settings.toSettingsChanges();
|
||||
info->constraints = merged_settings.toSettingsConstraints(manager);
|
||||
|
||||
auto settings = merged_settings.toSettings();
|
||||
auto constraints = merged_settings.toSettingsConstraints(manager);
|
||||
enabled.setSettingsAndConstraints(
|
||||
std::make_shared<Settings>(std::move(settings)), std::make_shared<SettingsConstraints>(std::move(constraints)));
|
||||
enabled.setInfo(std::move(info));
|
||||
}
|
||||
|
||||
|
||||
void SettingsProfilesCache::substituteProfiles(SettingsProfileElements & elements) const
|
||||
void SettingsProfilesCache::substituteProfiles(
|
||||
SettingsProfileElements & elements,
|
||||
std::vector<UUID> & substituted_profiles,
|
||||
std::unordered_map<UUID, String> & names_of_substituted_profiles) const
|
||||
{
|
||||
boost::container::flat_set<UUID> already_substituted;
|
||||
for (size_t i = 0; i != elements.size();)
|
||||
/// We should substitute profiles in reversive order because the same profile can occur
|
||||
/// in `elements` multiple times (with some other settings in between) and in this case
|
||||
/// the last occurrence should override all the previous ones.
|
||||
boost::container::flat_set<UUID> substituted_profiles_set;
|
||||
size_t i = elements.size();
|
||||
while (i != 0)
|
||||
{
|
||||
auto & element = elements[i];
|
||||
auto & element = elements[--i];
|
||||
if (!element.parent_profile)
|
||||
{
|
||||
++i;
|
||||
continue;
|
||||
}
|
||||
|
||||
auto parent_profile_id = *element.parent_profile;
|
||||
auto profile_id = *element.parent_profile;
|
||||
element.parent_profile.reset();
|
||||
if (already_substituted.count(parent_profile_id))
|
||||
{
|
||||
++i;
|
||||
if (substituted_profiles_set.count(profile_id))
|
||||
continue;
|
||||
}
|
||||
|
||||
already_substituted.insert(parent_profile_id);
|
||||
auto parent_profile = all_profiles.find(parent_profile_id);
|
||||
if (parent_profile == all_profiles.end())
|
||||
{
|
||||
++i;
|
||||
auto profile_it = all_profiles.find(profile_id);
|
||||
if (profile_it == all_profiles.end())
|
||||
continue;
|
||||
}
|
||||
|
||||
const auto & parent_profile_elements = parent_profile->second->elements;
|
||||
elements.insert(elements.begin() + i, parent_profile_elements.begin(), parent_profile_elements.end());
|
||||
const auto & profile = profile_it->second;
|
||||
const auto & profile_elements = profile->elements;
|
||||
elements.insert(elements.begin() + i, profile_elements.begin(), profile_elements.end());
|
||||
i += profile_elements.size();
|
||||
substituted_profiles.push_back(profile_id);
|
||||
substituted_profiles_set.insert(profile_id);
|
||||
names_of_substituted_profiles.emplace(profile_id, profile->getName());
|
||||
}
|
||||
std::reverse(substituted_profiles.begin(), substituted_profiles.end());
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<const EnabledSettings> SettingsProfilesCache::getEnabledSettings(
|
||||
const UUID & user_id,
|
||||
const SettingsProfileElements & settings_from_user,
|
||||
@ -216,26 +215,26 @@ std::shared_ptr<const EnabledSettings> SettingsProfilesCache::getEnabledSettings
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<const SettingsChanges> SettingsProfilesCache::getProfileSettings(const String & profile_name)
|
||||
std::shared_ptr<const SettingsProfilesInfo> SettingsProfilesCache::getSettingsProfileInfo(const UUID & profile_id)
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
ensureAllProfilesRead();
|
||||
|
||||
auto it = profiles_by_name.find(profile_name);
|
||||
if (it == profiles_by_name.end())
|
||||
throw Exception("Settings profile " + backQuote(profile_name) + " not found", ErrorCodes::THERE_IS_NO_PROFILE);
|
||||
const UUID profile_id = it->second;
|
||||
|
||||
auto it2 = settings_for_profiles.find(profile_id);
|
||||
if (it2 != settings_for_profiles.end())
|
||||
return it2->second;
|
||||
if (auto pos = this->profile_infos_cache.get(profile_id))
|
||||
return *pos;
|
||||
|
||||
SettingsProfileElements elements = all_profiles[profile_id]->elements;
|
||||
substituteProfiles(elements);
|
||||
auto res = std::make_shared<const SettingsChanges>(elements.toSettingsChanges());
|
||||
settings_for_profiles.emplace(profile_id, res);
|
||||
return res;
|
||||
|
||||
auto info = std::make_shared<SettingsProfilesInfo>(manager);
|
||||
|
||||
info->profiles.push_back(profile_id);
|
||||
info->profiles_with_implicit.push_back(profile_id);
|
||||
substituteProfiles(elements, info->profiles_with_implicit, info->names_of_profiles);
|
||||
info->settings = elements.toSettingsChanges();
|
||||
info->constraints.merge(elements.toSettingsConstraints(manager));
|
||||
|
||||
profile_infos_cache.add(profile_id, info);
|
||||
return info;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -1,8 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <Access/EnabledSettings.h>
|
||||
#include <Core/UUID.h>
|
||||
#include <common/types.h>
|
||||
#include <Poco/LRUCache.h>
|
||||
#include <common/scope_guard.h>
|
||||
#include <map>
|
||||
#include <unordered_map>
|
||||
@ -13,9 +12,7 @@ namespace DB
|
||||
class AccessControlManager;
|
||||
struct SettingsProfile;
|
||||
using SettingsProfilePtr = std::shared_ptr<const SettingsProfile>;
|
||||
class SettingsProfileElements;
|
||||
class EnabledSettings;
|
||||
|
||||
struct SettingsProfilesInfo;
|
||||
|
||||
/// Reads and caches all the settings profiles.
|
||||
class SettingsProfilesCache
|
||||
@ -32,7 +29,7 @@ public:
|
||||
const boost::container::flat_set<UUID> & enabled_roles,
|
||||
const SettingsProfileElements & settings_from_enabled_roles_);
|
||||
|
||||
std::shared_ptr<const SettingsChanges> getProfileSettings(const String & profile_name);
|
||||
std::shared_ptr<const SettingsProfilesInfo> getSettingsProfileInfo(const UUID & profile_id);
|
||||
|
||||
private:
|
||||
void ensureAllProfilesRead();
|
||||
@ -40,7 +37,7 @@ private:
|
||||
void profileRemoved(const UUID & profile_id);
|
||||
void mergeSettingsAndConstraints();
|
||||
void mergeSettingsAndConstraintsFor(EnabledSettings & enabled) const;
|
||||
void substituteProfiles(SettingsProfileElements & elements) const;
|
||||
void substituteProfiles(SettingsProfileElements & elements, std::vector<UUID> & substituted_profiles, std::unordered_map<UUID, String> & names_of_substituted_profiles) const;
|
||||
|
||||
const AccessControlManager & manager;
|
||||
std::unordered_map<UUID, SettingsProfilePtr> all_profiles;
|
||||
@ -49,7 +46,7 @@ private:
|
||||
scope_guard subscription;
|
||||
std::map<EnabledSettings::Params, std::weak_ptr<EnabledSettings>> enabled_settings;
|
||||
std::optional<UUID> default_profile_id;
|
||||
std::unordered_map<UUID, std::shared_ptr<const SettingsChanges>> settings_for_profiles;
|
||||
Poco::LRUCache<UUID, std::shared_ptr<const SettingsProfilesInfo>> profile_infos_cache;
|
||||
mutable std::mutex mutex;
|
||||
};
|
||||
}
|
||||
|
52
src/Access/SettingsProfilesInfo.cpp
Normal file
52
src/Access/SettingsProfilesInfo.cpp
Normal file
@ -0,0 +1,52 @@
|
||||
#include <Access/SettingsProfilesInfo.h>
|
||||
#include <Access/SettingsConstraintsAndProfileIDs.h>
|
||||
#include <common/removeDuplicates.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
bool operator==(const SettingsProfilesInfo & lhs, const SettingsProfilesInfo & rhs)
|
||||
{
|
||||
if (lhs.settings != rhs.settings)
|
||||
return false;
|
||||
|
||||
if (lhs.constraints != rhs.constraints)
|
||||
return false;
|
||||
|
||||
if (lhs.profiles != rhs.profiles)
|
||||
return false;
|
||||
|
||||
if (lhs.profiles_with_implicit != rhs.profiles_with_implicit)
|
||||
return false;
|
||||
|
||||
if (lhs.names_of_profiles != rhs.names_of_profiles)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
std::shared_ptr<const SettingsConstraintsAndProfileIDs>
|
||||
SettingsProfilesInfo::getConstraintsAndProfileIDs(const std::shared_ptr<const SettingsConstraintsAndProfileIDs> & previous) const
|
||||
{
|
||||
auto res = std::make_shared<SettingsConstraintsAndProfileIDs>(manager);
|
||||
res->current_profiles = profiles;
|
||||
|
||||
res->constraints = previous ? previous->constraints : constraints;
|
||||
|
||||
if (previous)
|
||||
{
|
||||
res->enabled_profiles.reserve(previous->enabled_profiles.size() + profiles_with_implicit.size());
|
||||
res->enabled_profiles = previous->enabled_profiles;
|
||||
}
|
||||
res->enabled_profiles.insert(res->enabled_profiles.end(), profiles_with_implicit.begin(), profiles_with_implicit.end());
|
||||
|
||||
/// If some profile occurs multiple times (with some other settings in between),
|
||||
/// the latest occurrence overrides all the previous ones.
|
||||
removeDuplicatesKeepLast(res->current_profiles);
|
||||
removeDuplicatesKeepLast(res->enabled_profiles);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
43
src/Access/SettingsProfilesInfo.h
Normal file
43
src/Access/SettingsProfilesInfo.h
Normal file
@ -0,0 +1,43 @@
|
||||
#pragma once
|
||||
|
||||
#include <Access/SettingsConstraints.h>
|
||||
#include <Common/SettingsChanges.h>
|
||||
#include <Core/UUID.h>
|
||||
#include <unordered_map>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
struct SettingsConstraintsAndProfileIDs;
|
||||
|
||||
/// Information about the default settings which are applied to an user on login.
|
||||
struct SettingsProfilesInfo
|
||||
{
|
||||
SettingsChanges settings;
|
||||
SettingsConstraints constraints;
|
||||
|
||||
/// Profiles explicitly assigned to the user.
|
||||
std::vector<UUID> profiles;
|
||||
|
||||
/// Profiles assigned to the user both explicitly and implicitly.
|
||||
/// Implicitly assigned profiles include parent profiles of other assigned profiles,
|
||||
/// profiles assigned via granted roles, profiles assigned via their own settings,
|
||||
/// and the main default profile (see the section `default_profile` in the main configuration file).
|
||||
/// The order of IDs in this vector corresponds the order of applying of these profiles.
|
||||
std::vector<UUID> profiles_with_implicit;
|
||||
|
||||
/// Names of all the profiles in `profiles`.
|
||||
std::unordered_map<UUID, String> names_of_profiles;
|
||||
|
||||
SettingsProfilesInfo(const AccessControlManager & manager_) : constraints(manager_), manager(manager_) {}
|
||||
std::shared_ptr<const SettingsConstraintsAndProfileIDs> getConstraintsAndProfileIDs(
|
||||
const std::shared_ptr<const SettingsConstraintsAndProfileIDs> & previous = nullptr) const;
|
||||
|
||||
friend bool operator ==(const SettingsProfilesInfo & lhs, const SettingsProfilesInfo & rhs);
|
||||
friend bool operator !=(const SettingsProfilesInfo & lhs, const SettingsProfilesInfo & rhs) { return !(lhs == rhs); }
|
||||
|
||||
private:
|
||||
const AccessControlManager & manager;
|
||||
};
|
||||
|
||||
}
|
@ -45,6 +45,7 @@ SRCS(
|
||||
SettingsProfilesCache.cpp
|
||||
User.cpp
|
||||
UsersConfigAccessStorage.cpp
|
||||
tests/gtest_access_rights_ops.cpp
|
||||
|
||||
)
|
||||
|
||||
|
@ -95,18 +95,18 @@ AggregateFunctionPtr AggregateFunctionFactory::get(
|
||||
// nullability themselves. Another special case is functions from Nothing
|
||||
// that are rewritten to AggregateFunctionNothing, in this case
|
||||
// nested_function is nullptr.
|
||||
if (nested_function && nested_function->isOnlyWindowFunction())
|
||||
if (!nested_function || !nested_function->isOnlyWindowFunction())
|
||||
{
|
||||
return nested_function;
|
||||
return combinator->transformAggregateFunction(nested_function,
|
||||
out_properties, type_without_low_cardinality, parameters);
|
||||
}
|
||||
}
|
||||
|
||||
return combinator->transformAggregateFunction(nested_function, out_properties, type_without_low_cardinality, parameters);
|
||||
}
|
||||
auto with_original_arguments = getImpl(name, type_without_low_cardinality, parameters, out_properties, false);
|
||||
|
||||
auto res = getImpl(name, type_without_low_cardinality, parameters, out_properties, false);
|
||||
if (!res)
|
||||
if (!with_original_arguments)
|
||||
throw Exception("Logical error: AggregateFunctionFactory returned nullptr", ErrorCodes::LOGICAL_ERROR);
|
||||
return res;
|
||||
return with_original_arguments;
|
||||
}
|
||||
|
||||
|
||||
|
@ -60,7 +60,7 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
String getName() const override { return Data::name(); }
|
||||
String getName() const override { return Policy::name; }
|
||||
|
||||
DataTypePtr getReturnType() const override { return std::make_shared<DataTypeNumber<T>>(); }
|
||||
|
||||
@ -120,6 +120,7 @@ template <typename Data>
|
||||
class BitmapAndPolicy
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = "groupBitmapAnd";
|
||||
static void apply(Data & lhs, const Data & rhs) { lhs.rbs.rb_and(rhs.rbs); }
|
||||
};
|
||||
|
||||
@ -127,6 +128,7 @@ template <typename Data>
|
||||
class BitmapOrPolicy
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = "groupBitmapOr";
|
||||
static void apply(Data & lhs, const Data & rhs) { lhs.rbs.rb_or(rhs.rbs); }
|
||||
};
|
||||
|
||||
@ -134,6 +136,7 @@ template <typename Data>
|
||||
class BitmapXorPolicy
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = "groupBitmapXor";
|
||||
static void apply(Data & lhs, const Data & rhs) { lhs.rbs.rb_xor(rhs.rbs); }
|
||||
};
|
||||
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <AggregateFunctions/AggregateFunctionSequenceMatch.h>
|
||||
|
||||
#include <DataTypes/DataTypeDate.h>
|
||||
#include <DataTypes/DataTypeDate32.h>
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
|
||||
#include <common/range.h>
|
||||
|
@ -459,6 +459,8 @@ public:
|
||||
explicit FieldVisitorMax(const Field & rhs_) : rhs(rhs_) {}
|
||||
|
||||
bool operator() (Null &) const { throw Exception("Cannot compare Nulls", ErrorCodes::LOGICAL_ERROR); }
|
||||
bool operator() (NegativeInfinity &) const { throw Exception("Cannot compare -Inf", ErrorCodes::LOGICAL_ERROR); }
|
||||
bool operator() (PositiveInfinity &) const { throw Exception("Cannot compare +Inf", ErrorCodes::LOGICAL_ERROR); }
|
||||
bool operator() (AggregateFunctionStateData &) const { throw Exception("Cannot compare AggregateFunctionStates", ErrorCodes::LOGICAL_ERROR); }
|
||||
|
||||
bool operator() (Array & x) const { return compareImpl<Array>(x); }
|
||||
@ -494,6 +496,8 @@ public:
|
||||
explicit FieldVisitorMin(const Field & rhs_) : rhs(rhs_) {}
|
||||
|
||||
bool operator() (Null &) const { throw Exception("Cannot compare Nulls", ErrorCodes::LOGICAL_ERROR); }
|
||||
bool operator() (NegativeInfinity &) const { throw Exception("Cannot compare -Inf", ErrorCodes::LOGICAL_ERROR); }
|
||||
bool operator() (PositiveInfinity &) const { throw Exception("Cannot compare +Inf", ErrorCodes::LOGICAL_ERROR); }
|
||||
bool operator() (AggregateFunctionStateData &) const { throw Exception("Cannot sum AggregateFunctionStates", ErrorCodes::LOGICAL_ERROR); }
|
||||
|
||||
bool operator() (Array & x) const { return compareImpl<Array>(x); }
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <AggregateFunctions/FactoryHelpers.h>
|
||||
|
||||
#include <DataTypes/DataTypeDate.h>
|
||||
#include <DataTypes/DataTypeDate32.h>
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
#include <DataTypes/DataTypeUUID.h>
|
||||
@ -49,6 +50,8 @@ AggregateFunctionPtr createAggregateFunctionUniq(const std::string & name, const
|
||||
return res;
|
||||
else if (which.isDate())
|
||||
return std::make_shared<AggregateFunctionUniq<DataTypeDate::FieldType, Data>>(argument_types);
|
||||
else if (which.isDate32())
|
||||
return std::make_shared<AggregateFunctionUniq<DataTypeDate32::FieldType, Data>>(argument_types);
|
||||
else if (which.isDateTime())
|
||||
return std::make_shared<AggregateFunctionUniq<DataTypeDateTime::FieldType, Data>>(argument_types);
|
||||
else if (which.isStringOrFixedString())
|
||||
@ -95,6 +98,8 @@ AggregateFunctionPtr createAggregateFunctionUniq(const std::string & name, const
|
||||
return res;
|
||||
else if (which.isDate())
|
||||
return std::make_shared<AggregateFunctionUniq<DataTypeDate::FieldType, Data<DataTypeDate::FieldType>>>(argument_types);
|
||||
else if (which.isDate32())
|
||||
return std::make_shared<AggregateFunctionUniq<DataTypeDate32::FieldType, Data<DataTypeDate32::FieldType>>>(argument_types);
|
||||
else if (which.isDateTime())
|
||||
return std::make_shared<AggregateFunctionUniq<DataTypeDateTime::FieldType, Data<DataTypeDateTime::FieldType>>>(argument_types);
|
||||
else if (which.isStringOrFixedString())
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Common/FieldVisitorConvertToNumber.h>
|
||||
|
||||
#include <DataTypes/DataTypeDate.h>
|
||||
#include <DataTypes/DataTypeDate32.h>
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
|
||||
#include <functional>
|
||||
@ -51,6 +52,8 @@ namespace
|
||||
return res;
|
||||
else if (which.isDate())
|
||||
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunction<DataTypeDate::FieldType>>(argument_types, params);
|
||||
else if (which.isDate32())
|
||||
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunction<DataTypeDate32::FieldType>>(argument_types, params);
|
||||
else if (which.isDateTime())
|
||||
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunction<DataTypeDateTime::FieldType>>(argument_types, params);
|
||||
else if (which.isStringOrFixedString())
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <AggregateFunctions/AggregateFunctionUniqUpTo.h>
|
||||
#include <Common/FieldVisitorConvertToNumber.h>
|
||||
#include <DataTypes/DataTypeDate.h>
|
||||
#include <DataTypes/DataTypeDate32.h>
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypeFixedString.h>
|
||||
@ -61,6 +62,8 @@ AggregateFunctionPtr createAggregateFunctionUniqUpTo(const std::string & name, c
|
||||
return res;
|
||||
else if (which.isDate())
|
||||
return std::make_shared<AggregateFunctionUniqUpTo<DataTypeDate::FieldType>>(threshold, argument_types, params);
|
||||
else if (which.isDate32())
|
||||
return std::make_shared<AggregateFunctionUniqUpTo<DataTypeDate32::FieldType>>(threshold, argument_types, params);
|
||||
else if (which.isDateTime())
|
||||
return std::make_shared<AggregateFunctionUniqUpTo<DataTypeDateTime::FieldType>>(threshold, argument_types, params);
|
||||
else if (which.isStringOrFixedString())
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <AggregateFunctions/Helpers.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <DataTypes/DataTypeDate.h>
|
||||
#include <DataTypes/DataTypeDate32.h>
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
|
||||
#include <common/range.h>
|
||||
|
@ -28,6 +28,8 @@ HedgedConnections::HedgedConnections(
|
||||
std::shared_ptr<QualifiedTableName> table_to_check_)
|
||||
: hedged_connections_factory(pool_, &settings_, timeouts_, table_to_check_)
|
||||
, settings(settings_)
|
||||
, drain_timeout(settings.drain_timeout)
|
||||
, allow_changing_replica_until_first_data_packet(settings.allow_changing_replica_until_first_data_packet)
|
||||
, throttler(throttler_)
|
||||
{
|
||||
std::vector<Connection *> connections = hedged_connections_factory.getManyConnections(pool_mode);
|
||||
@ -251,7 +253,7 @@ Packet HedgedConnections::drain()
|
||||
|
||||
while (!epoll.empty())
|
||||
{
|
||||
ReplicaLocation location = getReadyReplicaLocation();
|
||||
ReplicaLocation location = getReadyReplicaLocation(DrainCallback{drain_timeout});
|
||||
Packet packet = receivePacketFromReplica(location);
|
||||
switch (packet.type)
|
||||
{
|
||||
@ -278,10 +280,10 @@ Packet HedgedConnections::drain()
|
||||
Packet HedgedConnections::receivePacket()
|
||||
{
|
||||
std::lock_guard lock(cancel_mutex);
|
||||
return receivePacketUnlocked({});
|
||||
return receivePacketUnlocked({}, false /* is_draining */);
|
||||
}
|
||||
|
||||
Packet HedgedConnections::receivePacketUnlocked(AsyncCallback async_callback)
|
||||
Packet HedgedConnections::receivePacketUnlocked(AsyncCallback async_callback, bool /* is_draining */)
|
||||
{
|
||||
if (!sent_query)
|
||||
throw Exception("Cannot receive packets: no query sent.", ErrorCodes::LOGICAL_ERROR);
|
||||
@ -396,7 +398,7 @@ Packet HedgedConnections::receivePacketFromReplica(const ReplicaLocation & repli
|
||||
{
|
||||
/// If we are allowed to change replica until the first data packet,
|
||||
/// just restart timeout (if it hasn't expired yet). Otherwise disable changing replica with this offset.
|
||||
if (settings.allow_changing_replica_until_first_data_packet && !replica.is_change_replica_timeout_expired)
|
||||
if (allow_changing_replica_until_first_data_packet && !replica.is_change_replica_timeout_expired)
|
||||
replica.change_replica_timeout.setRelative(hedged_connections_factory.getConnectionTimeouts().receive_data_timeout);
|
||||
else
|
||||
disableChangingReplica(replica_location);
|
||||
|
@ -97,7 +97,7 @@ public:
|
||||
|
||||
Packet receivePacket() override;
|
||||
|
||||
Packet receivePacketUnlocked(AsyncCallback async_callback) override;
|
||||
Packet receivePacketUnlocked(AsyncCallback async_callback, bool is_draining) override;
|
||||
|
||||
void disconnect() override;
|
||||
|
||||
@ -189,6 +189,12 @@ private:
|
||||
|
||||
Epoll epoll;
|
||||
const Settings & settings;
|
||||
|
||||
/// The following two fields are from settings but can be referenced outside the lifetime of
|
||||
/// settings when connection is drained asynchronously.
|
||||
Poco::Timespan drain_timeout;
|
||||
bool allow_changing_replica_until_first_data_packet;
|
||||
|
||||
ThrottlerPtr throttler;
|
||||
bool sent_query = false;
|
||||
bool cancelled = false;
|
||||
|
31
src/Client/IConnections.cpp
Normal file
31
src/Client/IConnections.cpp
Normal file
@ -0,0 +1,31 @@
|
||||
#include <Client/IConnections.h>
|
||||
#include <Poco/Net/SocketImpl.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int SOCKET_TIMEOUT;
|
||||
}
|
||||
|
||||
/// This wrapper struct allows us to use Poco's socket polling code with a raw fd.
|
||||
/// The only difference from Poco::Net::SocketImpl is that we don't close the fd in the destructor.
|
||||
struct PocoSocketWrapper : public Poco::Net::SocketImpl
|
||||
{
|
||||
explicit PocoSocketWrapper(int fd)
|
||||
{
|
||||
reset(fd);
|
||||
}
|
||||
|
||||
// Do not close fd.
|
||||
~PocoSocketWrapper() override { reset(-1); }
|
||||
};
|
||||
|
||||
void IConnections::DrainCallback::operator()(int fd, Poco::Timespan, const std::string fd_description) const
|
||||
{
|
||||
if (!PocoSocketWrapper(fd).poll(drain_timeout, Poco::Net::Socket::SELECT_READ))
|
||||
throw Exception(ErrorCodes::SOCKET_TIMEOUT, "Read timeout while draining from {}", fd_description);
|
||||
}
|
||||
|
||||
}
|
@ -10,6 +10,12 @@ namespace DB
|
||||
class IConnections : boost::noncopyable
|
||||
{
|
||||
public:
|
||||
struct DrainCallback
|
||||
{
|
||||
Poco::Timespan drain_timeout;
|
||||
void operator()(int fd, Poco::Timespan, const std::string fd_description = "") const;
|
||||
};
|
||||
|
||||
/// Send all scalars to replicas.
|
||||
virtual void sendScalarsData(Scalars & data) = 0;
|
||||
/// Send all content of external tables to replicas.
|
||||
@ -30,7 +36,7 @@ public:
|
||||
virtual Packet receivePacket() = 0;
|
||||
|
||||
/// Version of `receivePacket` function without locking.
|
||||
virtual Packet receivePacketUnlocked(AsyncCallback async_callback) = 0;
|
||||
virtual Packet receivePacketUnlocked(AsyncCallback async_callback, bool is_draining) = 0;
|
||||
|
||||
/// Break all active connections.
|
||||
virtual void disconnect() = 0;
|
||||
|
@ -18,7 +18,7 @@ namespace ErrorCodes
|
||||
|
||||
|
||||
MultiplexedConnections::MultiplexedConnections(Connection & connection, const Settings & settings_, const ThrottlerPtr & throttler)
|
||||
: settings(settings_)
|
||||
: settings(settings_), drain_timeout(settings.drain_timeout), receive_timeout(settings.receive_timeout)
|
||||
{
|
||||
connection.setThrottler(throttler);
|
||||
|
||||
@ -30,9 +30,8 @@ MultiplexedConnections::MultiplexedConnections(Connection & connection, const Se
|
||||
}
|
||||
|
||||
MultiplexedConnections::MultiplexedConnections(
|
||||
std::vector<IConnectionPool::Entry> && connections,
|
||||
const Settings & settings_, const ThrottlerPtr & throttler)
|
||||
: settings(settings_)
|
||||
std::vector<IConnectionPool::Entry> && connections, const Settings & settings_, const ThrottlerPtr & throttler)
|
||||
: settings(settings_), drain_timeout(settings.drain_timeout), receive_timeout(settings.receive_timeout)
|
||||
{
|
||||
/// If we didn't get any connections from pool and getMany() did not throw exceptions, this means that
|
||||
/// `skip_unavailable_shards` was set. Then just return.
|
||||
@ -168,7 +167,7 @@ void MultiplexedConnections::sendReadTaskResponse(const String & response)
|
||||
Packet MultiplexedConnections::receivePacket()
|
||||
{
|
||||
std::lock_guard lock(cancel_mutex);
|
||||
Packet packet = receivePacketUnlocked({});
|
||||
Packet packet = receivePacketUnlocked({}, false /* is_draining */);
|
||||
return packet;
|
||||
}
|
||||
|
||||
@ -216,7 +215,7 @@ Packet MultiplexedConnections::drain()
|
||||
|
||||
while (hasActiveConnections())
|
||||
{
|
||||
Packet packet = receivePacketUnlocked({});
|
||||
Packet packet = receivePacketUnlocked(DrainCallback{drain_timeout}, true /* is_draining */);
|
||||
|
||||
switch (packet.type)
|
||||
{
|
||||
@ -264,14 +263,14 @@ std::string MultiplexedConnections::dumpAddressesUnlocked() const
|
||||
return buf.str();
|
||||
}
|
||||
|
||||
Packet MultiplexedConnections::receivePacketUnlocked(AsyncCallback async_callback)
|
||||
Packet MultiplexedConnections::receivePacketUnlocked(AsyncCallback async_callback, bool is_draining)
|
||||
{
|
||||
if (!sent_query)
|
||||
throw Exception("Cannot receive packets: no query sent.", ErrorCodes::LOGICAL_ERROR);
|
||||
if (!hasActiveConnections())
|
||||
throw Exception("No more packets are available.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
ReplicaState & state = getReplicaForReading();
|
||||
ReplicaState & state = getReplicaForReading(is_draining);
|
||||
current_connection = state.connection;
|
||||
if (current_connection == nullptr)
|
||||
throw Exception("Logical error: no available replica", ErrorCodes::NO_AVAILABLE_REPLICA);
|
||||
@ -323,9 +322,10 @@ Packet MultiplexedConnections::receivePacketUnlocked(AsyncCallback async_callbac
|
||||
return packet;
|
||||
}
|
||||
|
||||
MultiplexedConnections::ReplicaState & MultiplexedConnections::getReplicaForReading()
|
||||
MultiplexedConnections::ReplicaState & MultiplexedConnections::getReplicaForReading(bool is_draining)
|
||||
{
|
||||
if (replica_states.size() == 1)
|
||||
/// Fast path when we only focus on one replica and are not draining the connection.
|
||||
if (replica_states.size() == 1 && !is_draining)
|
||||
return replica_states[0];
|
||||
|
||||
Poco::Net::Socket::SocketList read_list;
|
||||
@ -353,10 +353,26 @@ MultiplexedConnections::ReplicaState & MultiplexedConnections::getReplicaForRead
|
||||
read_list.push_back(*connection->socket);
|
||||
}
|
||||
|
||||
int n = Poco::Net::Socket::select(read_list, write_list, except_list, settings.receive_timeout);
|
||||
int n = Poco::Net::Socket::select(
|
||||
read_list,
|
||||
write_list,
|
||||
except_list,
|
||||
is_draining ? drain_timeout : receive_timeout);
|
||||
|
||||
if (n == 0)
|
||||
throw Exception("Timeout exceeded while reading from " + dumpAddressesUnlocked(), ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
{
|
||||
auto err_msg = fmt::format("Timeout exceeded while reading from {}", dumpAddressesUnlocked());
|
||||
for (ReplicaState & state : replica_states)
|
||||
{
|
||||
Connection * connection = state.connection;
|
||||
if (connection != nullptr)
|
||||
{
|
||||
connection->disconnect();
|
||||
invalidateReplica(state);
|
||||
}
|
||||
}
|
||||
throw Exception(err_msg, ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
}
|
||||
}
|
||||
|
||||
/// TODO Absolutely wrong code: read_list could be empty; motivation of rand is unclear.
|
||||
|
@ -61,7 +61,7 @@ public:
|
||||
bool hasActiveConnections() const override { return active_connection_count > 0; }
|
||||
|
||||
private:
|
||||
Packet receivePacketUnlocked(AsyncCallback async_callback) override;
|
||||
Packet receivePacketUnlocked(AsyncCallback async_callback, bool is_draining) override;
|
||||
|
||||
/// Internal version of `dumpAddresses` function without locking.
|
||||
std::string dumpAddressesUnlocked() const;
|
||||
@ -74,7 +74,7 @@ private:
|
||||
};
|
||||
|
||||
/// Get a replica where you can read the data.
|
||||
ReplicaState & getReplicaForReading();
|
||||
ReplicaState & getReplicaForReading(bool is_draining);
|
||||
|
||||
/// Mark the replica as invalid.
|
||||
void invalidateReplica(ReplicaState & replica_state);
|
||||
@ -82,6 +82,11 @@ private:
|
||||
private:
|
||||
const Settings & settings;
|
||||
|
||||
/// The following two fields are from settings but can be referenced outside the lifetime of
|
||||
/// settings when connection is drained asynchronously.
|
||||
Poco::Timespan drain_timeout;
|
||||
Poco::Timespan receive_timeout;
|
||||
|
||||
/// The current number of valid connections to the replicas of this shard.
|
||||
size_t active_connection_count = 0;
|
||||
|
||||
|
@ -546,97 +546,54 @@ namespace
|
||||
{
|
||||
|
||||
/// The following function implements a slightly more general version
|
||||
/// of getExtremes() than the implementation from ColumnVector.
|
||||
/// of getExtremes() than the implementation from Not-Null IColumns.
|
||||
/// It takes into account the possible presence of nullable values.
|
||||
template <typename T>
|
||||
void getExtremesFromNullableContent(const ColumnVector<T> & col, const NullMap & null_map, Field & min, Field & max)
|
||||
void getExtremesWithNulls(const IColumn & nested_column, const NullMap & null_array, Field & min, Field & max, bool null_last = false)
|
||||
{
|
||||
const auto & data = col.getData();
|
||||
size_t size = data.size();
|
||||
|
||||
if (size == 0)
|
||||
size_t number_of_nulls = 0;
|
||||
size_t n = null_array.size();
|
||||
NullMap not_null_array(n);
|
||||
for (auto i = 0ul; i < n; ++i)
|
||||
{
|
||||
min = Null();
|
||||
max = Null();
|
||||
return;
|
||||
if (null_array[i])
|
||||
{
|
||||
++number_of_nulls;
|
||||
not_null_array[i] = 0;
|
||||
}
|
||||
|
||||
bool has_not_null = false;
|
||||
bool has_not_nan = false;
|
||||
|
||||
T cur_min = 0;
|
||||
T cur_max = 0;
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
else
|
||||
{
|
||||
const T x = data[i];
|
||||
|
||||
if (null_map[i])
|
||||
continue;
|
||||
|
||||
if (!has_not_null)
|
||||
{
|
||||
cur_min = x;
|
||||
cur_max = x;
|
||||
has_not_null = true;
|
||||
has_not_nan = !isNaN(x);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (isNaN(x))
|
||||
continue;
|
||||
|
||||
if (!has_not_nan)
|
||||
{
|
||||
cur_min = x;
|
||||
cur_max = x;
|
||||
has_not_nan = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (x < cur_min)
|
||||
cur_min = x;
|
||||
else if (x > cur_max)
|
||||
cur_max = x;
|
||||
}
|
||||
|
||||
if (has_not_null)
|
||||
{
|
||||
min = cur_min;
|
||||
max = cur_max;
|
||||
not_null_array[i] = 1;
|
||||
}
|
||||
}
|
||||
if (number_of_nulls == 0)
|
||||
{
|
||||
nested_column.getExtremes(min, max);
|
||||
}
|
||||
else if (number_of_nulls == n)
|
||||
{
|
||||
min = PositiveInfinity();
|
||||
max = PositiveInfinity();
|
||||
}
|
||||
else
|
||||
{
|
||||
auto filtered_column = nested_column.filter(not_null_array, -1);
|
||||
filtered_column->getExtremes(min, max);
|
||||
if (null_last)
|
||||
max = PositiveInfinity();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
void ColumnNullable::getExtremes(Field & min, Field & max) const
|
||||
{
|
||||
min = Null();
|
||||
max = Null();
|
||||
getExtremesWithNulls(getNestedColumn(), getNullMapData(), min, max);
|
||||
}
|
||||
|
||||
const auto & null_map_data = getNullMapData();
|
||||
|
||||
if (const auto * col_i8 = typeid_cast<const ColumnInt8 *>(nested_column.get()))
|
||||
getExtremesFromNullableContent<Int8>(*col_i8, null_map_data, min, max);
|
||||
else if (const auto * col_i16 = typeid_cast<const ColumnInt16 *>(nested_column.get()))
|
||||
getExtremesFromNullableContent<Int16>(*col_i16, null_map_data, min, max);
|
||||
else if (const auto * col_i32 = typeid_cast<const ColumnInt32 *>(nested_column.get()))
|
||||
getExtremesFromNullableContent<Int32>(*col_i32, null_map_data, min, max);
|
||||
else if (const auto * col_i64 = typeid_cast<const ColumnInt64 *>(nested_column.get()))
|
||||
getExtremesFromNullableContent<Int64>(*col_i64, null_map_data, min, max);
|
||||
else if (const auto * col_u8 = typeid_cast<const ColumnUInt8 *>(nested_column.get()))
|
||||
getExtremesFromNullableContent<UInt8>(*col_u8, null_map_data, min, max);
|
||||
else if (const auto * col_u16 = typeid_cast<const ColumnUInt16 *>(nested_column.get()))
|
||||
getExtremesFromNullableContent<UInt16>(*col_u16, null_map_data, min, max);
|
||||
else if (const auto * col_u32 = typeid_cast<const ColumnUInt32 *>(nested_column.get()))
|
||||
getExtremesFromNullableContent<UInt32>(*col_u32, null_map_data, min, max);
|
||||
else if (const auto * col_u64 = typeid_cast<const ColumnUInt64 *>(nested_column.get()))
|
||||
getExtremesFromNullableContent<UInt64>(*col_u64, null_map_data, min, max);
|
||||
else if (const auto * col_f32 = typeid_cast<const ColumnFloat32 *>(nested_column.get()))
|
||||
getExtremesFromNullableContent<Float32>(*col_f32, null_map_data, min, max);
|
||||
else if (const auto * col_f64 = typeid_cast<const ColumnFloat64 *>(nested_column.get()))
|
||||
getExtremesFromNullableContent<Float64>(*col_f64, null_map_data, min, max);
|
||||
void ColumnNullable::getExtremesNullLast(Field & min, Field & max) const
|
||||
{
|
||||
getExtremesWithNulls(getNestedColumn(), getNullMapData(), min, max, true);
|
||||
}
|
||||
|
||||
|
||||
|
@ -111,6 +111,8 @@ public:
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
void getExtremes(Field & min, Field & max) const override;
|
||||
// Special function for nullable minmax index
|
||||
void getExtremesNullLast(Field & min, Field & max) const;
|
||||
|
||||
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override
|
||||
{
|
||||
|
@ -124,6 +124,10 @@ class FindResultImpl : public FindResultImplBase, public FindResultImplOffsetBas
|
||||
Mapped * value;
|
||||
|
||||
public:
|
||||
FindResultImpl()
|
||||
: FindResultImplBase(false), FindResultImplOffsetBase<need_offset>(0)
|
||||
{}
|
||||
|
||||
FindResultImpl(Mapped * value_, bool found_, size_t off)
|
||||
: FindResultImplBase(found_), FindResultImplOffsetBase<need_offset>(off), value(value_) {}
|
||||
Mapped & getMapped() const { return *value; }
|
||||
|
@ -71,6 +71,10 @@
|
||||
M(PartsInMemory, "In-memory parts.") \
|
||||
M(MMappedFiles, "Total number of mmapped files.") \
|
||||
M(MMappedFileBytes, "Sum size of mmapped file regions.") \
|
||||
M(AsyncDrainedConnections, "Number of connections drained asynchronously.") \
|
||||
M(ActiveAsyncDrainedConnections, "Number of active connections drained asynchronously.") \
|
||||
M(SyncDrainedConnections, "Number of connections drained synchronously.") \
|
||||
M(ActiveSyncDrainedConnections, "Number of active connections drained synchronously.") \
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
|
@ -26,6 +26,16 @@ public:
|
||||
throw Exception("Cannot convert NULL to " + demangle(typeid(T).name()), ErrorCodes::CANNOT_CONVERT_TYPE);
|
||||
}
|
||||
|
||||
T operator() (const NegativeInfinity &) const
|
||||
{
|
||||
throw Exception("Cannot convert -Inf to " + demangle(typeid(T).name()), ErrorCodes::CANNOT_CONVERT_TYPE);
|
||||
}
|
||||
|
||||
T operator() (const PositiveInfinity &) const
|
||||
{
|
||||
throw Exception("Cannot convert +Inf to " + demangle(typeid(T).name()), ErrorCodes::CANNOT_CONVERT_TYPE);
|
||||
}
|
||||
|
||||
T operator() (const String &) const
|
||||
{
|
||||
throw Exception("Cannot convert String to " + demangle(typeid(T).name()), ErrorCodes::CANNOT_CONVERT_TYPE);
|
||||
|
@ -25,6 +25,8 @@ static inline void writeQuoted(const DecimalField<T> & x, WriteBuffer & buf)
|
||||
}
|
||||
|
||||
String FieldVisitorDump::operator() (const Null &) const { return "NULL"; }
|
||||
String FieldVisitorDump::operator() (const NegativeInfinity &) const { return "-Inf"; }
|
||||
String FieldVisitorDump::operator() (const PositiveInfinity &) const { return "+Inf"; }
|
||||
String FieldVisitorDump::operator() (const UInt64 & x) const { return formatQuotedWithPrefix(x, "UInt64_"); }
|
||||
String FieldVisitorDump::operator() (const Int64 & x) const { return formatQuotedWithPrefix(x, "Int64_"); }
|
||||
String FieldVisitorDump::operator() (const Float64 & x) const { return formatQuotedWithPrefix(x, "Float64_"); }
|
||||
|
@ -10,6 +10,8 @@ class FieldVisitorDump : public StaticVisitor<String>
|
||||
{
|
||||
public:
|
||||
String operator() (const Null & x) const;
|
||||
String operator() (const NegativeInfinity & x) const;
|
||||
String operator() (const PositiveInfinity & x) const;
|
||||
String operator() (const UInt64 & x) const;
|
||||
String operator() (const UInt128 & x) const;
|
||||
String operator() (const UInt256 & x) const;
|
||||
|
@ -14,6 +14,18 @@ void FieldVisitorHash::operator() (const Null &) const
|
||||
hash.update(type);
|
||||
}
|
||||
|
||||
void FieldVisitorHash::operator() (const NegativeInfinity &) const
|
||||
{
|
||||
UInt8 type = Field::Types::NegativeInfinity;
|
||||
hash.update(type);
|
||||
}
|
||||
|
||||
void FieldVisitorHash::operator() (const PositiveInfinity &) const
|
||||
{
|
||||
UInt8 type = Field::Types::PositiveInfinity;
|
||||
hash.update(type);
|
||||
}
|
||||
|
||||
void FieldVisitorHash::operator() (const UInt64 & x) const
|
||||
{
|
||||
UInt8 type = Field::Types::UInt64;
|
||||
|
@ -16,6 +16,8 @@ public:
|
||||
FieldVisitorHash(SipHash & hash_);
|
||||
|
||||
void operator() (const Null & x) const;
|
||||
void operator() (const NegativeInfinity & x) const;
|
||||
void operator() (const PositiveInfinity & x) const;
|
||||
void operator() (const UInt64 & x) const;
|
||||
void operator() (const UInt128 & x) const;
|
||||
void operator() (const UInt256 & x) const;
|
||||
|
@ -22,6 +22,8 @@ bool FieldVisitorSum::operator() (UInt64 & x) const
|
||||
bool FieldVisitorSum::operator() (Float64 & x) const { x += get<Float64>(rhs); return x != 0; }
|
||||
|
||||
bool FieldVisitorSum::operator() (Null &) const { throw Exception("Cannot sum Nulls", ErrorCodes::LOGICAL_ERROR); }
|
||||
bool FieldVisitorSum::operator() (NegativeInfinity &) const { throw Exception("Cannot sum -Inf", ErrorCodes::LOGICAL_ERROR); }
|
||||
bool FieldVisitorSum::operator() (PositiveInfinity &) const { throw Exception("Cannot sum +Inf", ErrorCodes::LOGICAL_ERROR); }
|
||||
bool FieldVisitorSum::operator() (String &) const { throw Exception("Cannot sum Strings", ErrorCodes::LOGICAL_ERROR); }
|
||||
bool FieldVisitorSum::operator() (Array &) const { throw Exception("Cannot sum Arrays", ErrorCodes::LOGICAL_ERROR); }
|
||||
bool FieldVisitorSum::operator() (Tuple &) const { throw Exception("Cannot sum Tuples", ErrorCodes::LOGICAL_ERROR); }
|
||||
|
@ -21,6 +21,8 @@ public:
|
||||
bool operator() (UInt64 & x) const;
|
||||
bool operator() (Float64 & x) const;
|
||||
bool operator() (Null &) const;
|
||||
bool operator() (NegativeInfinity & x) const;
|
||||
bool operator() (PositiveInfinity & x) const;
|
||||
bool operator() (String &) const;
|
||||
bool operator() (Array &) const;
|
||||
bool operator() (Tuple &) const;
|
||||
|
@ -53,6 +53,8 @@ static String formatFloat(const Float64 x)
|
||||
|
||||
|
||||
String FieldVisitorToString::operator() (const Null &) const { return "NULL"; }
|
||||
String FieldVisitorToString::operator() (const NegativeInfinity &) const { return "-Inf"; }
|
||||
String FieldVisitorToString::operator() (const PositiveInfinity &) const { return "+Inf"; }
|
||||
String FieldVisitorToString::operator() (const UInt64 & x) const { return formatQuoted(x); }
|
||||
String FieldVisitorToString::operator() (const Int64 & x) const { return formatQuoted(x); }
|
||||
String FieldVisitorToString::operator() (const Float64 & x) const { return formatFloat(x); }
|
||||
|
@ -10,6 +10,8 @@ class FieldVisitorToString : public StaticVisitor<String>
|
||||
{
|
||||
public:
|
||||
String operator() (const Null & x) const;
|
||||
String operator() (const NegativeInfinity & x) const;
|
||||
String operator() (const PositiveInfinity & x) const;
|
||||
String operator() (const UInt64 & x) const;
|
||||
String operator() (const UInt128 & x) const;
|
||||
String operator() (const UInt256 & x) const;
|
||||
|
@ -7,6 +7,8 @@ namespace DB
|
||||
{
|
||||
|
||||
void FieldVisitorWriteBinary::operator() (const Null &, WriteBuffer &) const { }
|
||||
void FieldVisitorWriteBinary::operator() (const NegativeInfinity &, WriteBuffer &) const { }
|
||||
void FieldVisitorWriteBinary::operator() (const PositiveInfinity &, WriteBuffer &) const { }
|
||||
void FieldVisitorWriteBinary::operator() (const UInt64 & x, WriteBuffer & buf) const { writeVarUInt(x, buf); }
|
||||
void FieldVisitorWriteBinary::operator() (const Int64 & x, WriteBuffer & buf) const { writeVarInt(x, buf); }
|
||||
void FieldVisitorWriteBinary::operator() (const Float64 & x, WriteBuffer & buf) const { writeFloatBinary(x, buf); }
|
||||
|
@ -9,6 +9,8 @@ class FieldVisitorWriteBinary
|
||||
{
|
||||
public:
|
||||
void operator() (const Null & x, WriteBuffer & buf) const;
|
||||
void operator() (const NegativeInfinity & x, WriteBuffer & buf) const;
|
||||
void operator() (const PositiveInfinity & x, WriteBuffer & buf) const;
|
||||
void operator() (const UInt64 & x, WriteBuffer & buf) const;
|
||||
void operator() (const UInt128 & x, WriteBuffer & buf) const;
|
||||
void operator() (const UInt256 & x, WriteBuffer & buf) const;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user