mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge remote-tracking branch 'origin/master' into HEAD
This commit is contained in:
commit
e0bdbe73d2
@ -13,8 +13,3 @@ ClickHouse is an open-source column-oriented database management system that all
|
||||
* [Yandex.Messenger channel](https://yandex.ru/chat/#/join/20e380d9-c7be-4123-ab06-e95fb946975e) shares announcements and useful links in Russian.
|
||||
* [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any.
|
||||
* You can also [fill this form](https://clickhouse.tech/#meet) to meet Yandex ClickHouse team in person.
|
||||
|
||||
## Upcoming Events
|
||||
|
||||
* [ClickHouse Workshop in Novosibirsk](https://2020.codefest.ru/lecture/1628) on TBD date.
|
||||
* [Yandex C++ Open-Source Sprints in Moscow](https://events.yandex.ru/events/otkrytyj-kod-v-yandek-28-03-2020) on TBD date.
|
||||
|
@ -75,7 +75,7 @@ std::string determineDefaultTimeZone()
|
||||
|
||||
try
|
||||
{
|
||||
tz_database_path = fs::canonical(tz_database_path);
|
||||
tz_database_path = fs::weakly_canonical(tz_database_path);
|
||||
|
||||
/// The tzdata file exists. If it is inside the tz_database_dir,
|
||||
/// then the relative path is the time zone id.
|
||||
@ -91,7 +91,7 @@ std::string determineDefaultTimeZone()
|
||||
if (!tz_file_path.is_absolute())
|
||||
tz_file_path = tz_database_path / tz_file_path;
|
||||
|
||||
tz_file_path = fs::canonical(tz_file_path);
|
||||
tz_file_path = fs::weakly_canonical(tz_file_path);
|
||||
|
||||
fs::path relative_path = tz_file_path.lexically_relative(tz_database_path);
|
||||
if (!relative_path.empty() && *relative_path.begin() != ".." && *relative_path.begin() != ".")
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include <common/argsToConfig.h>
|
||||
#include <common/getThreadId.h>
|
||||
#include <common/coverage.h>
|
||||
#include <common/sleep.h>
|
||||
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
#include <IO/WriteBufferFromFileDescriptorDiscardOnFailure.h>
|
||||
@ -50,6 +51,7 @@
|
||||
#include <Common/getMultipleKeysFromConfig.h>
|
||||
#include <Common/ClickHouseRevision.h>
|
||||
#include <Common/Config/ConfigProcessor.h>
|
||||
#include <Common/SymbolIndex.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <Common/config_version.h>
|
||||
@ -83,7 +85,8 @@ static const size_t signal_pipe_buf_size =
|
||||
+ sizeof(ucontext_t)
|
||||
+ sizeof(StackTrace)
|
||||
+ sizeof(UInt32)
|
||||
+ max_query_id_size + 1; /// query_id + varint encoded length
|
||||
+ max_query_id_size + 1 /// query_id + varint encoded length
|
||||
+ sizeof(void*);
|
||||
|
||||
|
||||
using signal_function = void(int, siginfo_t*, void*);
|
||||
@ -133,13 +136,14 @@ static void signalHandler(int sig, siginfo_t * info, void * context)
|
||||
DB::writePODBinary(stack_trace, out);
|
||||
DB::writeBinary(UInt32(getThreadId()), out);
|
||||
DB::writeStringBinary(query_id, out);
|
||||
DB::writePODBinary(DB::current_thread, out);
|
||||
|
||||
out.next();
|
||||
|
||||
if (sig != SIGTSTP) /// This signal is used for debugging.
|
||||
{
|
||||
/// The time that is usually enough for separate thread to print info into log.
|
||||
::sleep(10);
|
||||
sleepForSeconds(10);
|
||||
call_default_signal_handler(sig);
|
||||
}
|
||||
|
||||
@ -216,16 +220,18 @@ public:
|
||||
StackTrace stack_trace(NoCapture{});
|
||||
UInt32 thread_num;
|
||||
std::string query_id;
|
||||
DB::ThreadStatus * thread_ptr{};
|
||||
|
||||
DB::readPODBinary(info, in);
|
||||
DB::readPODBinary(context, in);
|
||||
DB::readPODBinary(stack_trace, in);
|
||||
DB::readBinary(thread_num, in);
|
||||
DB::readBinary(query_id, in);
|
||||
DB::readPODBinary(thread_ptr, in);
|
||||
|
||||
/// This allows to receive more signals if failure happens inside onFault function.
|
||||
/// Example: segfault while symbolizing stack trace.
|
||||
std::thread([=, this] { onFault(sig, info, context, stack_trace, thread_num, query_id); }).detach();
|
||||
std::thread([=, this] { onFault(sig, info, context, stack_trace, thread_num, query_id, thread_ptr); }).detach();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -236,7 +242,8 @@ private:
|
||||
|
||||
void onTerminate(const std::string & message, UInt32 thread_num) const
|
||||
{
|
||||
LOG_FATAL(log, "(version {}{}) (from thread {}) {}", VERSION_STRING, VERSION_OFFICIAL, thread_num, message);
|
||||
LOG_FATAL(log, "(version {}{}, {}) (from thread {}) {}",
|
||||
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info, thread_num, message);
|
||||
}
|
||||
|
||||
void onFault(
|
||||
@ -245,21 +252,30 @@ private:
|
||||
const ucontext_t & context,
|
||||
const StackTrace & stack_trace,
|
||||
UInt32 thread_num,
|
||||
const std::string & query_id) const
|
||||
const std::string & query_id,
|
||||
DB::ThreadStatus * thread_ptr) const
|
||||
{
|
||||
DB::ThreadStatus thread_status;
|
||||
|
||||
/// Send logs from this thread to client if possible.
|
||||
/// It will allow client to see failure messages directly.
|
||||
if (thread_ptr)
|
||||
{
|
||||
if (auto logs_queue = thread_ptr->getInternalTextLogsQueue())
|
||||
DB::CurrentThread::attachInternalTextLogsQueue(logs_queue, DB::LogsLevel::trace);
|
||||
}
|
||||
|
||||
LOG_FATAL(log, "########################################");
|
||||
|
||||
if (query_id.empty())
|
||||
{
|
||||
std::stringstream message;
|
||||
message << "(version " << VERSION_STRING << VERSION_OFFICIAL << ")";
|
||||
message << " (from thread " << thread_num << ")";
|
||||
if (query_id.empty())
|
||||
message << " (no query)";
|
||||
else
|
||||
message << " (query_id: " << query_id << ")";
|
||||
message << " Received signal " << strsignal(sig) << " (" << sig << ").";
|
||||
|
||||
LOG_FATAL(log, message.str());
|
||||
LOG_FATAL(log, "(version {}{}, {}) (from thread {}) (no query) Received signal {} ({})",
|
||||
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info, thread_num, strsignal(sig), sig);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_FATAL(log, "(version {}{}, {}) (from thread {}) (query_id: {}) Received signal {} ({})",
|
||||
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info, thread_num, query_id, strsignal(sig), sig);
|
||||
}
|
||||
|
||||
LOG_FATAL(log, signalToErrorMessage(sig, info, context));
|
||||
@ -279,6 +295,10 @@ private:
|
||||
|
||||
/// Write symbolized stack trace line by line for better grep-ability.
|
||||
stack_trace.toStringEveryLine([&](const std::string & s) { LOG_FATAL(log, s); });
|
||||
|
||||
/// When everything is done, we will try to send these error messages to client.
|
||||
if (thread_ptr)
|
||||
thread_ptr->onFatalError();
|
||||
}
|
||||
};
|
||||
|
||||
@ -292,17 +312,15 @@ static void sanitizerDeathCallback()
|
||||
|
||||
StringRef query_id = DB::CurrentThread::getQueryId(); /// This is signal safe.
|
||||
|
||||
if (query_id.size == 0)
|
||||
{
|
||||
std::stringstream message;
|
||||
message << "(version " << VERSION_STRING << VERSION_OFFICIAL << ")";
|
||||
message << " (from thread " << getThreadId() << ")";
|
||||
if (query_id.size == 0)
|
||||
message << " (no query)";
|
||||
else
|
||||
message << " (query_id: " << query_id << ")";
|
||||
message << " Sanitizer trap.";
|
||||
|
||||
LOG_FATAL(log, message.str());
|
||||
LOG_FATAL(log, "(version {}{}) (from thread {}) (no query) Sanitizer trap.",
|
||||
VERSION_STRING, VERSION_OFFICIAL, getThreadId());
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_FATAL(log, "(version {}{}) (from thread {}) (query_id: {}) Sanitizer trap.",
|
||||
VERSION_STRING, VERSION_OFFICIAL, getThreadId(), query_id);
|
||||
}
|
||||
|
||||
/// Just in case print our own stack trace. In case when llvm-symbolizer does not work.
|
||||
@ -711,12 +729,23 @@ void BaseDaemon::initializeTerminationAndSignalProcessing()
|
||||
|
||||
signal_listener = std::make_unique<SignalListener>(*this);
|
||||
signal_listener_thread.start(*signal_listener);
|
||||
|
||||
#if defined(__ELF__) && !defined(__FreeBSD__)
|
||||
String build_id_hex = DB::SymbolIndex::instance().getBuildIDHex();
|
||||
if (build_id_hex.empty())
|
||||
build_id_info = "no build id";
|
||||
else
|
||||
build_id_info = "build id: " + build_id_hex;
|
||||
#else
|
||||
build_id_info = "no build id";
|
||||
#endif
|
||||
}
|
||||
|
||||
void BaseDaemon::logRevision() const
|
||||
{
|
||||
Poco::Logger::root().information("Starting " + std::string{VERSION_FULL}
|
||||
+ " with revision " + std::to_string(ClickHouseRevision::get())
|
||||
+ ", " + build_id_info
|
||||
+ ", PID " + std::to_string(getpid()));
|
||||
}
|
||||
|
||||
|
@ -198,6 +198,8 @@ protected:
|
||||
std::string config_path;
|
||||
DB::ConfigProcessor::LoadedConfig loaded_config;
|
||||
Poco::Util::AbstractConfiguration * last_configuration = nullptr;
|
||||
|
||||
String build_id_info;
|
||||
};
|
||||
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
# This strings autochanged from release_lib.sh:
|
||||
SET(VERSION_REVISION 54435)
|
||||
SET(VERSION_REVISION 54436)
|
||||
SET(VERSION_MAJOR 20)
|
||||
SET(VERSION_MINOR 5)
|
||||
SET(VERSION_MINOR 6)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 91df18a906dcffdbee6816e5389df6c65f86e35f)
|
||||
SET(VERSION_DESCRIBE v20.5.1.1-prestable)
|
||||
SET(VERSION_STRING 20.5.1.1)
|
||||
SET(VERSION_GITHASH efc57fb063b3fb4df968d916720ec4d4ced4642e)
|
||||
SET(VERSION_DESCRIBE v20.6.1.1-prestable)
|
||||
SET(VERSION_STRING 20.6.1.1)
|
||||
# end of autochange
|
||||
|
@ -18,7 +18,7 @@ message(STATUS "Default libraries: ${DEFAULT_LIBS}")
|
||||
set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS})
|
||||
set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS})
|
||||
|
||||
# glibc-compatibility library relies to fixed version of libc headers
|
||||
# glibc-compatibility library relies to constant version of libc headers
|
||||
# (because minor changes in function attributes between different glibc versions will introduce incompatibilities)
|
||||
# This is for x86_64. For other architectures we have separate toolchains.
|
||||
if (ARCH_AMD64 AND NOT_UNBUNDLED)
|
||||
|
@ -219,7 +219,9 @@ if (ENABLE_HYPERSCAN)
|
||||
|
||||
target_compile_definitions (hyperscan PUBLIC USE_HYPERSCAN=1)
|
||||
target_compile_options (hyperscan
|
||||
PRIVATE -g0 -march=corei7 # library has too much debug information
|
||||
PRIVATE -g0 # Library has too much debug information
|
||||
-march=corei7 -O2 -fno-strict-aliasing -fno-omit-frame-pointer -fvisibility=hidden # The options from original build system
|
||||
-fno-sanitize=undefined # Assume the library takes care of itself
|
||||
)
|
||||
target_include_directories (hyperscan
|
||||
PRIVATE
|
||||
|
4
debian/changelog
vendored
4
debian/changelog
vendored
@ -1,5 +1,5 @@
|
||||
clickhouse (20.5.1.1) unstable; urgency=low
|
||||
clickhouse (20.6.1.1) unstable; urgency=low
|
||||
|
||||
* Modified source code
|
||||
|
||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Tue, 28 Apr 2020 20:12:13 +0300
|
||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Mon, 22 Jun 2020 20:40:23 +0300
|
||||
|
2
debian/control
vendored
2
debian/control
vendored
@ -28,7 +28,7 @@ Description: Client binary for ClickHouse
|
||||
|
||||
Package: clickhouse-common-static
|
||||
Architecture: any
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}, tzdata
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}
|
||||
Suggests: clickhouse-common-static-dbg
|
||||
Replaces: clickhouse-common, clickhouse-server-base
|
||||
Provides: clickhouse-common, clickhouse-server-base
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||
ARG version=20.5.1.*
|
||||
ARG version=20.6.1.*
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install --yes --no-install-recommends \
|
||||
@ -17,7 +17,6 @@ RUN apt-get update \
|
||||
clickhouse-client=$version \
|
||||
clickhouse-common-static=$version \
|
||||
locales \
|
||||
tzdata \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf \
|
||||
&& apt-get clean
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||
ARG version=20.5.1.*
|
||||
ARG version=20.6.1.*
|
||||
ARG gosu_ver=1.10
|
||||
|
||||
RUN apt-get update \
|
||||
@ -21,7 +21,6 @@ RUN apt-get update \
|
||||
locales \
|
||||
ca-certificates \
|
||||
wget \
|
||||
tzdata \
|
||||
&& rm -rf \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/cache/debconf \
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||
ARG version=20.5.1.*
|
||||
ARG version=20.6.1.*
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y apt-transport-https dirmngr && \
|
||||
|
@ -17,8 +17,7 @@ RUN apt-get update \
|
||||
odbc-postgresql \
|
||||
sqlite3 \
|
||||
curl \
|
||||
tar \
|
||||
tzdata
|
||||
tar
|
||||
RUN rm -rf \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/cache/debconf \
|
||||
|
@ -7,3 +7,7 @@ services:
|
||||
POSTGRES_PASSWORD: mysecretpassword
|
||||
ports:
|
||||
- 5432:5432
|
||||
networks:
|
||||
default:
|
||||
aliases:
|
||||
- postgre-sql.local
|
||||
|
@ -5,4 +5,11 @@ toc_priority: 25
|
||||
toc_title: hidden
|
||||
---
|
||||
|
||||
# ClickHouse Engines
|
||||
|
||||
There are two key engine kinds in ClickHouse:
|
||||
|
||||
- [Table engines](table-engines/index.md)
|
||||
- [Database engines](database-engines/index.md)
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/engines/) ##}
|
||||
|
@ -19,7 +19,7 @@ The table engine (type of table) determines:
|
||||
|
||||
### MergeTree {#mergetree}
|
||||
|
||||
The most universal and functional table engines for high-load tasks. The property shared by these engines is quick data insertion with subsequent background data processing. `MergeTree` family engines support data replication (with [Replicated\*](../../engines/table-engines/mergetree-family/replication.md#table_engines-replication) versions of engines), partitioning, and other features not supported in other engines.
|
||||
The most universal and functional table engines for high-load tasks. The property shared by these engines is quick data insertion with subsequent background data processing. `MergeTree` family engines support data replication (with [Replicated\*](../../engines/table-engines/mergetree-family/replication.md#table_engines-replication) versions of engines), partitioning, secondary data-skipping indexes, and other features not supported in other engines.
|
||||
|
||||
Engines in the family:
|
||||
|
||||
@ -80,4 +80,4 @@ To select data from a virtual column, you must specify its name in the `SELECT`
|
||||
|
||||
If you create a table with a column that has the same name as one of the table virtual columns, the virtual column becomes inaccessible. We don’t recommend doing this. To help avoid conflicts, virtual column names are usually prefixed with an underscore.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/) <!--hide-->
|
||||
[Original article](https://clickhouse.tech/docs/en/engines/table-engines/) <!--hide-->
|
||||
|
@ -5,7 +5,7 @@ toc_title: ReplacingMergeTree
|
||||
|
||||
# ReplacingMergeTree {#replacingmergetree}
|
||||
|
||||
The engine differs from [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md#table_engines-mergetree) in that it removes duplicate entries with the same primary key value (or more accurately, with the same [sorting key](../../../engines/table-engines/mergetree-family/mergetree.md) value).
|
||||
The engine differs from [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md#table_engines-mergetree) in that it removes duplicate entries with the same [sorting key](../../../engines/table-engines/mergetree-family/mergetree.md) value.
|
||||
|
||||
Data deduplication occurs only during a merge. Merging occurs in the background at an unknown time, so you can’t plan for it. Some of the data may remain unprocessed. Although you can run an unscheduled merge using the `OPTIMIZE` query, don’t count on using it, because the `OPTIMIZE` query will read and write a large amount of data.
|
||||
|
||||
@ -33,7 +33,7 @@ For a description of request parameters, see [request description](../../../sql-
|
||||
|
||||
- `ver` — column with version. Type `UInt*`, `Date` or `DateTime`. Optional parameter.
|
||||
|
||||
When merging, `ReplacingMergeTree` from all the rows with the same primary key leaves only one:
|
||||
When merging, `ReplacingMergeTree` from all the rows with the same sorting key leaves only one:
|
||||
|
||||
- Last in the selection, if `ver` not set.
|
||||
- With the maximum version, if `ver` specified.
|
||||
|
@ -1,58 +0,0 @@
|
||||
---
|
||||
toc_priority: 78
|
||||
toc_title: General Questions
|
||||
---
|
||||
|
||||
# General Questions {#general-questions}
|
||||
|
||||
## Why Not Use Something Like MapReduce? {#why-not-use-something-like-mapreduce}
|
||||
|
||||
We can refer to systems like MapReduce as distributed computing systems in which the reduce operation is based on distributed sorting. The most common open-source solution in this class is [Apache Hadoop](http://hadoop.apache.org). Yandex uses its in-house solution, YT.
|
||||
|
||||
These systems aren’t appropriate for online queries due to their high latency. In other words, they can’t be used as the back-end for a web interface. These types of systems aren’t useful for real-time data updates. Distributed sorting isn’t the best way to perform reduce operations if the result of the operation and all the intermediate results (if there are any) are located in the RAM of a single server, which is usually the case for online queries. In such a case, a hash table is an optimal way to perform reduce operations. A common approach to optimizing map-reduce tasks is pre-aggregation (partial reduce) using a hash table in RAM. The user performs this optimization manually. Distributed sorting is one of the main causes of reduced performance when running simple map-reduce tasks.
|
||||
|
||||
Most MapReduce implementations allow you to execute arbitrary code on a cluster. But a declarative query language is better suited to OLAP to run experiments quickly. For example, Hadoop has Hive and Pig. Also consider Cloudera Impala or Shark (outdated) for Spark, as well as Spark SQL, Presto, and Apache Drill. Performance when running such tasks is highly sub-optimal compared to specialized systems, but relatively high latency makes it unrealistic to use these systems as the backend for a web interface.
|
||||
|
||||
## What If I Have a Problem with Encodings When Using Oracle Through ODBC? {#oracle-odbc-encodings}
|
||||
|
||||
If you use Oracle through the ODBC driver as a source of external dictionaries, you need to set the correct value for the `NLS_LANG` environment variable in `/etc/default/clickhouse`. For more information, see the [Oracle NLS\_LANG FAQ](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html).
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
NLS_LANG=RUSSIAN_RUSSIA.UTF8
|
||||
```
|
||||
|
||||
## How Do I Export Data from ClickHouse to a File? {#how-to-export-to-file}
|
||||
|
||||
### Using INTO OUTFILE Clause {#using-into-outfile-clause}
|
||||
|
||||
Add an [INTO OUTFILE](../sql-reference/statements/select/into-outfile.md#into-outfile-clause) clause to your query.
|
||||
|
||||
For example:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM table INTO OUTFILE 'file'
|
||||
```
|
||||
|
||||
By default, ClickHouse uses the [TabSeparated](../interfaces/formats.md#tabseparated) format for output data. To select the [data format](../interfaces/formats.md), use the [FORMAT clause](../sql-reference/statements/select/format.md#format-clause).
|
||||
|
||||
For example:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM table INTO OUTFILE 'file' FORMAT CSV
|
||||
```
|
||||
|
||||
### Using a File-Engine Table {#using-a-file-engine-table}
|
||||
|
||||
See [File](../engines/table-engines/special/file.md).
|
||||
|
||||
### Using Command-Line Redirection {#using-command-line-redirection}
|
||||
|
||||
``` sql
|
||||
$ clickhouse-client --query "SELECT * from table" --format FormatName > result.txt
|
||||
```
|
||||
|
||||
See [clickhouse-client](../interfaces/cli.md).
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/faq/general/) ##}
|
11
docs/en/faq/general/dbms-naming.md
Normal file
11
docs/en/faq/general/dbms-naming.md
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
toc_hidden: true
|
||||
toc_priority: 10
|
||||
---
|
||||
|
||||
# What Does “ClickHouse” Mean? {#what-does-clickhouse-mean}
|
||||
|
||||
It’s a combination of “**Click**stream” and “Data ware**house**”. It comes from the original use case at Yandex.Metrica, where ClickHouse was supposed to keep records of all clicks by people from all over the Internet and it still does the job. You can read more about this use case on [ClickHouse history](../../introduction/history.md) page.
|
||||
|
||||
!!! info "Fun fact"
|
||||
Many years after ClickHouse got its name, this approach of combining two words that are meaningful on their own has been highlighted as the best way to name a database in a [research by Andy Pavlo](https://www.cs.cmu.edu/~pavlo/blog/2020/03/on-naming-a-database-management-system.html), an Associate Professor of Databases at Carnegie Mellon University. ClickHouse shared his “best database name of all time” award with Postgres.
|
18
docs/en/faq/general/index.md
Normal file
18
docs/en/faq/general/index.md
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
toc_hidden_folder: true
|
||||
toc_priority: 1
|
||||
toc_title: General
|
||||
---
|
||||
|
||||
# General Questions About ClickHouse {#general-questions}
|
||||
|
||||
Questions:
|
||||
|
||||
- [What does “ClickHouse” mean?](../../faq/general/dbms-naming.md)
|
||||
- [What does “Не тормозит” mean?](../../faq/general/ne-tormozit.md)
|
||||
- [Why not use something like MapReduce?](../../faq/general/mapreduce.md)
|
||||
|
||||
!!! info "Don’t see what you were looking for?"
|
||||
Check out [other F.A.Q. categories](../../faq/index.md) or browse around main documentation articles found in the left sidebar.
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/faq/general/) ##}
|
12
docs/en/faq/general/mapreduce.md
Normal file
12
docs/en/faq/general/mapreduce.md
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
toc_hidden: true
|
||||
toc_priority: 20
|
||||
---
|
||||
|
||||
# Why Not Use Something Like MapReduce? {#why-not-use-something-like-mapreduce}
|
||||
|
||||
We can refer to systems like MapReduce as distributed computing systems in which the reduce operation is based on distributed sorting. The most common open-source solution in this class is [Apache Hadoop](http://hadoop.apache.org). Yandex uses its in-house solution, YT.
|
||||
|
||||
These systems aren’t appropriate for online queries due to their high latency. In other words, they can’t be used as the back-end for a web interface. These types of systems aren’t useful for real-time data updates. Distributed sorting isn’t the best way to perform reduce operations if the result of the operation and all the intermediate results (if there are any) are located in the RAM of a single server, which is usually the case for online queries. In such a case, a hash table is an optimal way to perform reduce operations. A common approach to optimizing map-reduce tasks is pre-aggregation (partial reduce) using a hash table in RAM. The user performs this optimization manually. Distributed sorting is one of the main causes of reduced performance when running simple map-reduce tasks.
|
||||
|
||||
Most MapReduce implementations allow you to execute arbitrary code on a cluster. But a declarative query language is better suited to OLAP to run experiments quickly. For example, Hadoop has Hive and Pig. Also consider Cloudera Impala or Shark (outdated) for Spark, as well as Spark SQL, Presto, and Apache Drill. Performance when running such tasks is highly sub-optimal compared to specialized systems, but relatively high latency makes it unrealistic to use these systems as the backend for a web interface.
|
24
docs/en/faq/general/ne-tormozit.md
Normal file
24
docs/en/faq/general/ne-tormozit.md
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
toc_hidden: true
|
||||
toc_priority: 11
|
||||
---
|
||||
|
||||
# What Does “Не тормозит” mean? {#what-does-ne-tormozit-mean}
|
||||
|
||||
This question usually arises when people see official ClickHouse t-shirts. They have large words **“ClickHouse не тормозит”** on the front.
|
||||
|
||||
Before ClickHouse became open-source, it has been developed as an in-house storage system by the largest Russian IT company, [Yandex](https://yandex.com/company/). That’s why it initially got its slogan in Russian, which is “не тормозит”. After the open-source release we first produced some of those t-shirts for events in Russia and it was a no-brainer to use the slogan as-is.
|
||||
|
||||
One of the following batches of those t-shirts was supposed to be given away on events outside of Russia and we tried to make the English version of the slogan. Unfortunately, the Russian language is kind of elegant in terms of expressing stuff and there was a restriction of limited space on a t-shirt, so we failed to come up with good enough translation (most options appeared to be either long or inaccurate) and decided to keep the slogan in Russian even on t-shirts produced for international events. It appeared to be a great decision because people all over the world get positively surprised and curious when they see it.
|
||||
|
||||
So, what does it mean? Here are some ways to translate *“не тормозит”*:
|
||||
|
||||
- If you translate it literally, it’d be something like *“ClickHouse doesn’t press the brake pedal”*.
|
||||
- If you’d want to express it as close to how it sounds to a Russian person with IT background, it’d be something like *“If you larger system lags, it’s not because it uses ClickHouse”*.
|
||||
- Shorter, but not so precise versions could be *“ClickHouse is not slow”*, *“ClickHouse doesn’t lag”* or just *“ClickHouse is fast”*.
|
||||
|
||||
If you haven’t seen one of those t-shirts in person, you can check them out online in many ClickHouse-related videos. For example, this one:
|
||||
|
||||
![iframe](https://www.youtube.com/embed/bSyQahMVZ7w)
|
||||
|
||||
P.S. These t-shirts are not for sale, they are given away for free on most [ClickHouse Meetups](https://clickhouse.tech/#meet), usually for best questions or other forms of active participation.
|
@ -2,7 +2,16 @@
|
||||
toc_folder_title: F.A.Q.
|
||||
toc_hidden: true
|
||||
toc_priority: 76
|
||||
toc_title: hidden
|
||||
---
|
||||
|
||||
# ClickHouse F.A.Q {#clickhouse-f-a-q}
|
||||
|
||||
This section of the documentation is a place to collect answers to ClickHouse-related questions that arise often.
|
||||
|
||||
Categories:
|
||||
|
||||
- [General](../faq/general/index.md)
|
||||
- [Operations](../faq/operations/index.md)
|
||||
- [Integration](../faq/integration/index.md)
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/faq) ##}
|
||||
|
36
docs/en/faq/integration/file-export.md
Normal file
36
docs/en/faq/integration/file-export.md
Normal file
@ -0,0 +1,36 @@
|
||||
---
|
||||
toc_hidden: true
|
||||
toc_priority: 10
|
||||
---
|
||||
|
||||
# How Do I Export Data from ClickHouse to a File? {#how-to-export-to-file}
|
||||
|
||||
## Using INTO OUTFILE Clause {#using-into-outfile-clause}
|
||||
|
||||
Add an [INTO OUTFILE](../../sql-reference/statements/select/into-outfile.md#into-outfile-clause) clause to your query.
|
||||
|
||||
For example:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM table INTO OUTFILE 'file'
|
||||
```
|
||||
|
||||
By default, ClickHouse uses the [TabSeparated](../../interfaces/formats.md#tabseparated) format for output data. To select the [data format](../../interfaces/formats.md), use the [FORMAT clause](../../sql-reference/statements/select/format.md#format-clause).
|
||||
|
||||
For example:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM table INTO OUTFILE 'file' FORMAT CSV
|
||||
```
|
||||
|
||||
## Using a File-Engine Table {#using-a-file-engine-table}
|
||||
|
||||
See [File](../../engines/table-engines/special/file.md) table engine.
|
||||
|
||||
## Using Command-Line Redirection {#using-command-line-redirection}
|
||||
|
||||
``` sql
|
||||
$ clickhouse-client --query "SELECT * from table" --format FormatName > result.txt
|
||||
```
|
||||
|
||||
See [clickhouse-client](../../interfaces/cli.md).
|
17
docs/en/faq/integration/index.md
Normal file
17
docs/en/faq/integration/index.md
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
toc_hidden_folder: true
|
||||
toc_priority: 3
|
||||
toc_title: Integration
|
||||
---
|
||||
|
||||
# Question About Integrating ClickHouse and Other Systems {#question-about-integrating-clickhouse-and-other-systems}
|
||||
|
||||
Questions:
|
||||
|
||||
- [How do I export data from ClickHouse to a file?](../../faq/integration/file-export.md)
|
||||
- [What if I Have a problem with encodings when connecting to Oracle via ODBC?](../../faq/integration/oracle-odbc.md)
|
||||
|
||||
!!! info "Don’t see what you were looking for?"
|
||||
Check out [other F.A.Q. categories](../../faq/index.md) or browse around main documentation articles found in the left sidebar.
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/faq/integration/) ##}
|
14
docs/en/faq/integration/oracle-odbc.md
Normal file
14
docs/en/faq/integration/oracle-odbc.md
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
toc_hidden: true
|
||||
toc_priority: 20
|
||||
---
|
||||
|
||||
# What If I Have a Problem with Encodings When Using Oracle Via ODBC? {#oracle-odbc-encodings}
|
||||
|
||||
If you use Oracle as a source of ClickHouse external dictionaries via Oracle ODBC driver, you need to set the correct value for the `NLS_LANG` environment variable in `/etc/default/clickhouse`. For more information, see the [Oracle NLS\_LANG FAQ](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html).
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
NLS_LANG=RUSSIAN_RUSSIA.UTF8
|
||||
```
|
16
docs/en/faq/operations/index.md
Normal file
16
docs/en/faq/operations/index.md
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
toc_hidden_folder: true
|
||||
toc_priority: 2
|
||||
toc_title: Operations
|
||||
---
|
||||
|
||||
# Question About Operating ClickHouse Servers and Clusters {#question-about-operating-clickhouse-servers-and-clusters}
|
||||
|
||||
Questions:
|
||||
|
||||
- [Which ClickHouse version to use in production?](../../faq/operations/production.md)
|
||||
|
||||
!!! info "Don’t see what you were looking for?"
|
||||
Check out [other F.A.Q. categories](../../faq/index.md) or browse around main documentation articles found in the left sidebar.
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/faq/production/) ##}
|
69
docs/en/faq/operations/production.md
Normal file
69
docs/en/faq/operations/production.md
Normal file
@ -0,0 +1,69 @@
|
||||
---
|
||||
toc_hidden: true
|
||||
toc_priority: 10
|
||||
---
|
||||
|
||||
# Which ClickHouse Version to Use in Production? {#which-clickhouse-version-to-use-in-production}
|
||||
|
||||
First of all, let’s discuss why people ask this question in the first place. There are two key reasons:
|
||||
|
||||
1. ClickHouse is developed with pretty high velocity and usually, there are 10+ stable releases per year. It makes a wide range of releases to choose from, which is not so trivial choice.
|
||||
2. Some users want to avoid spending time figuring out which version works best for their use case and just follow someone else’s advice.
|
||||
|
||||
The second reason is more fundamental, so we’ll start with it and then get back to navigating through various ClickHouse releases.
|
||||
|
||||
## Which ClickHouse Version Do You Recommend? {#which-clickhouse-version-do-you-recommend}
|
||||
|
||||
It’s tempting to hire consultants or trust some known experts to get rid of responsibility for your production environment. You install some specific ClickHouse version that someone else recommended, now if there’s some issue with it - it’s not your fault, it’s someone else’s. This line of reasoning is a big trap. No external person knows better what’s going on in your company’s production environment.
|
||||
|
||||
So how to properly choose which ClickHouse version to upgrade to? Or how to choose your first ClickHouse version? First of all, you need to invest in setting up a **realistic pre-production environment**. In an ideal world, it could be a completely identical shadow copy, but that’s usually expensive.
|
||||
|
||||
Here’re some key points to get reasonable fidelity in a pre-production environment with not so high costs:
|
||||
|
||||
- Pre-production environment needs to run an as close set of queries as you intend to run in production:
|
||||
- Don’t make it read-only with some frozen data.
|
||||
- Don’t make it write-only with just copying data without building some typical reports.
|
||||
- Don’t wipe it clean instead of applying schema migrations.
|
||||
- Use a sample of real production data and queries. Try to choose a sample that’s still representative and makes `SELECT` queries return reasonable results. Use obfuscation if your data is sensitive and internal policies don’t allow it to leave the production environment.
|
||||
- Make sure that pre-production is covered by your monitoring and alerting software the same way as your production environment does.
|
||||
- If your production spans across multiple datacenters or regions, make your pre-production does the same.
|
||||
- If your production uses complex features like replication, distributed table, cascading materialize views, make sure they are configured similarly in pre-production.
|
||||
- There’s a trade-off on using the roughly same number of servers or VMs in pre-production as in production, but of smaller size, or much less of them, but of the same size. The first option might catch extra network-related issues, while the latter is easier to manage.
|
||||
|
||||
The second area to invest in is **automated testing infrastructure**. Don’t assume that if some kind of query has executed successfully once, it’ll continue to do so forever. It’s ok to have some unit tests where ClickHouse is mocked but make sure your product has a reasonable set of automated tests that are run against real ClickHouse and check that all important use cases are still working as expected.
|
||||
|
||||
Extra step forward could be contributing those automated tests to [ClickHouse’s open-source test infrastructure](https://github.com/ClickHouse/ClickHouse/tree/master/tests) that’s continuously used in its day-to-day development. It definitely will take some additional time and effort to learn [how to run it](../../development/tests.md) and then how to adapt your tests to this framework, but it’ll pay off by ensuring that ClickHouse releases are already tested against them when they are announced stable, instead of repeatedly losing time on reporting the issue after the fact and then waiting for a bugfix to be implemented, backported and released. Some companies even have such test contributions to infrastructure by its use as an internal policy, most notably it’s called [Beyonce’s Rule](https://www.oreilly.com/library/view/software-engineering-at/9781492082781/ch01.html#policies_that_scale_well) at Google.
|
||||
|
||||
When you have your pre-production environment and testing infrastructure in place, choosing the best version is straightforward:
|
||||
|
||||
1. Routinely run your automated tests against new ClickHouse releases. You can do it even for ClickHouse releases that are marked as `testing`, but going forward to the next steps with them is not recommended.
|
||||
2. Deploy the ClickHouse release that passed the tests to pre-production and check that all processes are running as expected.
|
||||
3. Report any issues you discovered to [ClickHouse GitHub Issues](https://github.com/ClickHouse/ClickHouse/issues).
|
||||
4. If there were no major issues, it should be safe to start deploying ClickHouse release to your production environment. Investing in gradual release automation that implements an approach similar to [canary releases](https://martinfowler.com/bliki/CanaryRelease.html) or [green-blue deployments](https://martinfowler.com/bliki/BlueGreenDeployment.html) might further reduce the risk of issues in production.
|
||||
|
||||
As you might have noticed, there’s nothing specific to ClickHouse in the approach described above, people do that for any piece of infrastructure they rely on if they take their production environment seriously.
|
||||
|
||||
## How to Choose Between ClickHouse Releases? {#how-to-choose-between-clickhouse-releases}
|
||||
|
||||
If you look into contents of ClickHouse package repository, you’ll see four kinds of packages:
|
||||
|
||||
1. `testing`
|
||||
2. `prestable`
|
||||
3. `stable`
|
||||
4. `lts` (long-term support)
|
||||
|
||||
As was mentioned earlier, `testing` is good mostly to notice issues early, running them in production is not recommended because each of them is not tested as thoroughly as other kinds of packages.
|
||||
|
||||
`prestable` is a release candidate which generally looks promising and is likely to become announced as `stable` soon. You can try them out in pre-production and report issues if you see any.
|
||||
|
||||
For production use, there are two key options: `stable` and `lts`. Here is some guidance on how to choose between them:
|
||||
|
||||
- `stable` is the kind of package we recommend by default. They are released roughly monthly (and thus provide new features with reasonable delay) and three latest stable releases are supported in terms of diagnostics and backporting of bugfixes.
|
||||
- `lts` are released twice a year and are supported for a year after their initial release. You might prefer them over `stable` in the following cases:
|
||||
- Your company has some internal policies that don’t allow for frequent upgrades or using non-LTS software.
|
||||
- You are using ClickHouse in some secondary products that either doesn’t require any complex ClickHouse features and don’t have enough resources to keep it updated.
|
||||
|
||||
Many teams who initially thought that `lts` is the way to go, often switch to `stable` anyway because of some recent feature that’s important for their product.
|
||||
|
||||
!!! warning "Important"
|
||||
One more thing to keep in mind when upgrading ClickHouse: we’re always keeping eye on compatibility across releases, but sometimes it’s not reasonable to keep and some minor details might change. So make sure you check the [changelog](../../whats-new/changelog/index.md) before upgrading to see if there are any notes about backward-incompatible changes.
|
@ -98,5 +98,12 @@ toc_title: Integrations
|
||||
- Elixir
|
||||
- [Ecto](https://github.com/elixir-ecto/ecto)
|
||||
- [clickhouse\_ecto](https://github.com/appodeal/clickhouse_ecto)
|
||||
- Ruby
|
||||
- [Ruby on Rails](https://rubyonrails.org/)
|
||||
- [activecube](https://github.com/bitquery/activecube)
|
||||
- [ActiveRecord](https://github.com/PNixx/clickhouse-activerecord)
|
||||
- [GraphQL](https://github.com/graphql)
|
||||
- [activecube-graphql](https://github.com/bitquery/activecube-graphql)
|
||||
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/interfaces/third-party/integrations/) <!--hide-->
|
||||
|
@ -15,11 +15,15 @@ It’s also worth noting that ClickHouse is a database management system, not a
|
||||
|
||||
## Data Compression {#data-compression}
|
||||
|
||||
Some column-oriented DBMSs (InfiniDB CE and MonetDB) do not use data compression. However, data compression does play a key role in achieving excellent performance.
|
||||
Some column-oriented DBMSs do not use data compression. However, data compression does play a key role in achieving excellent performance.
|
||||
|
||||
In addition to efficient general-purpose compression codecs with different trade-offs between disk space and CPU consumption, ClickHouse provides [specialized codecs](../sql-reference/statements/create.md#create-query-specialized-codecs) for specific kinds of data, which allow ClickHouse to compete with and outperform more niche databases, like time-series ones.
|
||||
|
||||
## Disk Storage of Data {#disk-storage-of-data}
|
||||
|
||||
Keeping data physically sorted by primary key makes it possible to extract data for its specific values or value ranges with low latency, less than a few dozen milliseconds. Some column-oriented DBMSs (such as SAP HANA and Google PowerDrill) can only work in RAM. This approach encourages the allocation of a larger hardware budget than is necessary for real-time analysis. ClickHouse is designed to work on regular hard drives, which means the cost per GB of data storage is low, but SSD and additional RAM are also fully used if available.
|
||||
Keeping data physically sorted by primary key makes it possible to extract data for its specific values or value ranges with low latency, less than a few dozen milliseconds. Some column-oriented DBMSs (such as SAP HANA and Google PowerDrill) can only work in RAM. This approach encourages the allocation of a larger hardware budget than is necessary for real-time analysis.
|
||||
|
||||
ClickHouse is designed to work on regular hard drives, which means the cost per GB of data storage is low, but SSD and additional RAM are also fully used if available.
|
||||
|
||||
## Parallel Processing on Multiple Cores {#parallel-processing-on-multiple-cores}
|
||||
|
||||
@ -28,15 +32,18 @@ Large queries are parallelized naturally, taking all the necessary resources ava
|
||||
## Distributed Processing on Multiple Servers {#distributed-processing-on-multiple-servers}
|
||||
|
||||
Almost none of the columnar DBMSs mentioned above have support for distributed query processing.
|
||||
|
||||
In ClickHouse, data can reside on different shards. Each shard can be a group of replicas used for fault tolerance. All shards are used to run a query in parallel, transparently for the user.
|
||||
|
||||
## SQL Support {#sql-support}
|
||||
|
||||
ClickHouse supports a declarative query language based on SQL that is identical to the SQL standard in many cases.
|
||||
Supported queries include GROUP BY, ORDER BY, subqueries in FROM, IN, and JOIN clauses, and scalar subqueries.
|
||||
Dependent subqueries and window functions are not supported.
|
||||
ClickHouse supports a [declarative query language based on SQL](../sql-reference/index.md) that is identical to the ANSI SQL standard in [many cases](../sql-reference/ansi.md).
|
||||
|
||||
## Vector Engine {#vector-engine}
|
||||
Supported queries include [GROUP BY](../sql-reference/statements/select/group-by.md), [ORDER BY](../sql-reference/statements/select/order-by.md), subqueries in [FROM](../sql-reference/statements/select/from.md), [JOIN](../sql-reference/statements/select/join.md) clause, [IN](../sql-reference/operators/in.md) operator, and scalar subqueries.
|
||||
|
||||
Correlated (dependent) subqueries and window functions are not supported at the time of writing but might become available in the future.
|
||||
|
||||
## Vector Computation Engine {#vector-engine}
|
||||
|
||||
Data is not only stored by columns but is processed by vectors (parts of columns), which allows achieving high CPU efficiency.
|
||||
|
||||
@ -44,13 +51,19 @@ Data is not only stored by columns but is processed by vectors (parts of columns
|
||||
|
||||
ClickHouse supports tables with a primary key. To quickly perform queries on the range of the primary key, the data is sorted incrementally using the merge tree. Due to this, data can continually be added to the table. No locks are taken when new data is ingested.
|
||||
|
||||
## Index {#index}
|
||||
## Primary Index {#primary-index}
|
||||
|
||||
Having a data physically sorted by primary key makes it possible to extract data for its specific values or value ranges with low latency, less than a few dozen milliseconds.
|
||||
|
||||
## Secondary Indexes {#secondary-indexes}
|
||||
|
||||
Unlike other database management systems, secondary indexes in ClickHouse does not point to specific rows or row ranges. Instead, they allow the database to know in advance that all rows in some data parts wouldn't match the query filtering conditions and do not read them at all, thus they are called [data skipping indexes](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-data_skipping-indexes).
|
||||
|
||||
## Suitable for Online Queries {#suitable-for-online-queries}
|
||||
|
||||
Low latency means that queries can be processed without delay and without trying to prepare an answer in advance, right at the same moment while the user interface page is loading. In other words, online.
|
||||
Most OLAP database management systems don't aim for online queries with sub-second latencies. In alternative systems, report building time of tens of seconds or even minutes is often considered acceptable. Sometimes it takes even more which forces to prepare reports offline (in advance or by responding with "come back later").
|
||||
|
||||
In ClickHouse low latency means that queries can be processed without delay and without trying to prepare an answer in advance, right at the same moment while the user interface page is loading. In other words, online.
|
||||
|
||||
## Support for Approximated Calculations {#support-for-approximated-calculations}
|
||||
|
||||
@ -60,16 +73,24 @@ ClickHouse provides various ways to trade accuracy for performance:
|
||||
2. Running a query based on a part (sample) of data and getting an approximated result. In this case, proportionally less data is retrieved from the disk.
|
||||
3. Running an aggregation for a limited number of random keys, instead of for all keys. Under certain conditions for key distribution in the data, this provides a reasonably accurate result while using fewer resources.
|
||||
|
||||
## Adaptive Join Algorithm
|
||||
|
||||
ClickHouse adaptively chooses how to [JOIN](../sql-reference/statements/select/join.md) multiple tables, by preferring hash-join algorithm and falling back to the merge-join algorithm if there's more than one large table.
|
||||
|
||||
## Data Replication and Data Integrity Support {#data-replication-and-data-integrity-support}
|
||||
|
||||
ClickHouse uses asynchronous multi-master replication. After being written to any available replica, all the remaining replicas retrieve their copy in the background. The system maintains identical data on different replicas. Recovery after most failures is performed automatically, or semi-automatically in complex cases.
|
||||
|
||||
For more information, see the section [Data replication](../engines/table-engines/mergetree-family/replication.md).
|
||||
|
||||
## Role-Based Access Control
|
||||
|
||||
ClickHouse implements user account management using SQL queries and allows for [role-based access control configuration](../operations/access-rights.md) similar to what can be found in ANSI SQL standard and popular relational database management systems.
|
||||
|
||||
## Features that Can Be Considered Disadvantages {#clickhouse-features-that-can-be-considered-disadvantages}
|
||||
|
||||
1. No full-fledged transactions.
|
||||
2. Lack of ability to modify or delete already inserted data with high rate and low latency. There are batch deletes and updates available to clean up or modify data, for example to comply with [GDPR](https://gdpr-info.eu).
|
||||
2. Lack of ability to modify or delete already inserted data with a high rate and low latency. There are batch deletes and updates available to clean up or modify data, for example, to comply with [GDPR](https://gdpr-info.eu).
|
||||
3. The sparse index makes ClickHouse not so efficient for point queries retrieving single rows by their keys.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/introduction/distinctive_features/) <!--hide-->
|
||||
[Original article](https://clickhouse.tech/docs/en/introduction/distinctive-features/) <!--hide-->
|
||||
|
@ -821,6 +821,10 @@ ClickHouse supports the following algorithms of choosing replicas:
|
||||
- [First or random](#load_balancing-first_or_random)
|
||||
- [Round robin](#load_balancing-round_robin)
|
||||
|
||||
See also:
|
||||
|
||||
- [distributed\_replica\_max\_ignored\_errors](#settings-distributed_replica_max_ignored_errors)
|
||||
|
||||
### Random (by Default) {#load_balancing-random}
|
||||
|
||||
``` sql
|
||||
@ -1170,8 +1174,10 @@ Controls how fast errors in distributed tables are zeroed. If a replica is unava
|
||||
|
||||
See also:
|
||||
|
||||
- [load\_balancing](#load_balancing-round_robin)
|
||||
- [Table engine Distributed](../../engines/table-engines/special/distributed.md)
|
||||
- [distributed\_replica\_error\_cap](#settings-distributed_replica_error_cap)
|
||||
- [distributed\_replica\_max\_ignored\_errors](#settings-distributed_replica_max_ignored_errors)
|
||||
|
||||
## distributed\_replica\_error\_cap {#settings-distributed_replica_error_cap}
|
||||
|
||||
@ -1182,8 +1188,24 @@ Error count of each replica is capped at this value, preventing a single replica
|
||||
|
||||
See also:
|
||||
|
||||
- [load\_balancing](#load_balancing-round_robin)
|
||||
- [Table engine Distributed](../../engines/table-engines/special/distributed.md)
|
||||
- [distributed\_replica\_error\_half\_life](#settings-distributed_replica_error_half_life)
|
||||
- [distributed\_replica\_max\_ignored\_errors](#settings-distributed_replica_max_ignored_errors)
|
||||
|
||||
## distributed\_replica\_max\_ignored\_errors {#settings-distributed_replica_max_ignored_errors}
|
||||
|
||||
- Type: unsigned int
|
||||
- Default value: 0
|
||||
|
||||
Number of errors that will be ignored while choosing replicas (according to `load_balancing` algorithm).
|
||||
|
||||
See also:
|
||||
|
||||
- [load\_balancing](#load_balancing-round_robin)
|
||||
- [Table engine Distributed](../../engines/table-engines/special/distributed.md)
|
||||
- [distributed\_replica\_error\_cap](#settings-distributed_replica_error_cap)
|
||||
- [distributed\_replica\_error\_half\_life](#settings-distributed_replica_error_half_life)
|
||||
|
||||
## distributed\_directory\_monitor\_sleep\_time\_ms {#distributed_directory_monitor_sleep_time_ms}
|
||||
|
||||
@ -1509,4 +1531,34 @@ Possible values:
|
||||
|
||||
Default value: 0.
|
||||
|
||||
## min_insert_block_size_rows_for_materialized_views {#min-insert-block-size-rows-for-materialized-views}
|
||||
|
||||
Sets minimum number of rows in block which can be inserted into a table by an `INSERT` query. Smaller-sized blocks are squashed into bigger ones. This setting is applied only for blocks inserted into [materialized view](../../sql-reference/statements/create.md#create-view). By adjusting this setting, you control blocks squashing while pushing to materialized view and avoid excessive memory usage.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Any positive integer.
|
||||
- 0 — Squashing disabled.
|
||||
|
||||
Default value: 1048576.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [min_insert_block_size_rows](#min-insert-block-size-rows)
|
||||
|
||||
## min_insert_block_size_bytes_for_materialized_views {#min-insert-block-size-bytes-for-materialized-views}
|
||||
|
||||
Sets minimum number of bytes in block which can be inserted into a table by an `INSERT` query. Smaller-sized blocks are squashed into bigger ones. This setting is applied only for blocks inserted into [materialized view](../../sql-reference/statements/create.md#create-view). By adjusting this setting, you control blocks squashing while pushing to materialized view and avoid excessive memory usage.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Any positive integer.
|
||||
- 0 — Squashing disabled.
|
||||
|
||||
Default value: 268435456.
|
||||
|
||||
**See also**
|
||||
|
||||
- [min_insert_block_size_bytes](#min-insert-block-size-bytes)
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide -->
|
||||
|
@ -9,6 +9,7 @@ The following aggregate functions are supported:
|
||||
- [`min`](../../sql-reference/aggregate-functions/reference/min.md#agg_function-min)
|
||||
- [`max`](../../sql-reference/aggregate-functions/reference/max.md#agg_function-max)
|
||||
- [`sum`](../../sql-reference/aggregate-functions/reference/sum.md#agg_function-sum)
|
||||
- [`sumWithOverflow`](../../sql-reference/aggregate-functions/reference/sumwithoverflow.md#sumwithoverflowx)
|
||||
- [`groupBitAnd`](../../sql-reference/aggregate-functions/reference/groupbitand.md#groupbitand)
|
||||
- [`groupBitOr`](../../sql-reference/aggregate-functions/reference/groupbitor.md#groupbitor)
|
||||
- [`groupBitXor`](../../sql-reference/aggregate-functions/reference/groupbitxor.md#groupbitxor)
|
||||
|
@ -206,7 +206,7 @@ Setting fields:
|
||||
|
||||
ClickHouse receives quoting symbols from ODBC-driver and quote all settings in queries to driver, so it’s necessary to set table name accordingly to table name case in database.
|
||||
|
||||
If you have a problems with encodings when using Oracle, see the corresponding [FAQ](../../../faq/general.md#oracle-odbc-encodings) article.
|
||||
If you have a problems with encodings when using Oracle, see the corresponding [F.A.Q.](../../../faq/integration/oracle-odbc.md) item.
|
||||
|
||||
### Known Vulnerability of the ODBC Dictionary Functionality {#known-vulnerability-of-the-odbc-dictionary-functionality}
|
||||
|
||||
|
@ -44,7 +44,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
```
|
||||
|
||||
Creates a table named ‘name’ in the ‘db’ database or the current database if ‘db’ is not set, with the structure specified in brackets and the ‘engine’ engine.
|
||||
The structure of the table is a list of column descriptions. If indexes are supported by the engine, they are indicated as parameters for the table engine.
|
||||
The structure of the table is a list of column descriptions, secondary indexes and constraints . If primary key is supported by the engine, it will be indicated as parameter for the table engine.
|
||||
|
||||
A column description is `name type` in the simplest case. Example: `RegionID UInt32`.
|
||||
Expressions can also be defined for default values (see below).
|
||||
@ -73,7 +73,7 @@ There can be other clauses after the `ENGINE` clause in the query. See detailed
|
||||
|
||||
### Default Values {#create-default-values}
|
||||
|
||||
The column description can specify an expression for a default value, in one of the following ways:`DEFAULT expr`, `MATERIALIZED expr`, `ALIAS expr`.
|
||||
The column description can specify an expression for a default value, in one of the following ways: `DEFAULT expr`, `MATERIALIZED expr`, `ALIAS expr`.
|
||||
Example: `URLDomain String DEFAULT domain(URL)`.
|
||||
|
||||
If an expression for the default value is not defined, the default values will be set to zeros for numbers, empty strings for strings, empty arrays for arrays, and `0000-00-00` for dates or `0000-00-00 00:00:00` for dates with time. NULLs are not supported.
|
||||
|
@ -98,5 +98,11 @@ toc_title: Integrations
|
||||
- Elixir
|
||||
- [Ecto](https://github.com/elixir-ecto/ecto)
|
||||
- [clickhouse\_ecto](https://github.com/appodeal/clickhouse_ecto)
|
||||
- Ruby
|
||||
- [Ruby on Rails](https://rubyonrails.org/)
|
||||
- [activecube](https://github.com/bitquery/activecube)
|
||||
- [ActiveRecord](https://github.com/PNixx/clickhouse-activerecord)
|
||||
- [GraphQL](https://github.com/graphql)
|
||||
- [activecube-graphql](https://github.com/bitquery/activecube-graphql)
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/interfaces/third-party/integrations/) <!--hide-->
|
||||
|
@ -100,5 +100,11 @@ toc_title: "\u06CC\u06A9\u067E\u0627\u0631\u0686\u06AF\u06CC"
|
||||
- اکسیر
|
||||
- [Ecto](https://github.com/elixir-ecto/ecto)
|
||||
- [حذف جستجو](https://github.com/appodeal/clickhouse_ecto)
|
||||
|
||||
- Ruby
|
||||
- [Ruby on Rails](https://rubyonrails.org/)
|
||||
- [activecube](https://github.com/bitquery/activecube)
|
||||
- [ActiveRecord](https://github.com/PNixx/clickhouse-activerecord)
|
||||
- [GraphQL](https://github.com/graphql)
|
||||
- [activecube-graphql](https://github.com/bitquery/activecube-graphql)
|
||||
|
||||
[مقاله اصلی](https://clickhouse.tech/docs/en/interfaces/third-party/integrations/) <!--hide-->
|
||||
|
@ -100,5 +100,11 @@ toc_title: "Int\xE9gration"
|
||||
- Elixir
|
||||
- [Ecto](https://github.com/elixir-ecto/ecto)
|
||||
- [clickhouse\_ecto](https://github.com/appodeal/clickhouse_ecto)
|
||||
- Ruby
|
||||
- [Ruby on Rails](https://rubyonrails.org/)
|
||||
- [activecube](https://github.com/bitquery/activecube)
|
||||
- [ActiveRecord](https://github.com/PNixx/clickhouse-activerecord)
|
||||
- [GraphQL](https://github.com/graphql)
|
||||
- [activecube-graphql](https://github.com/bitquery/activecube-graphql)
|
||||
|
||||
[Article Original](https://clickhouse.tech/docs/en/interfaces/third-party/integrations/) <!--hide-->
|
||||
|
@ -100,5 +100,11 @@ toc_title: "\u7D71\u5408"
|
||||
- エリクサー
|
||||
- [Ecto](https://github.com/elixir-ecto/ecto)
|
||||
- [clickhouse\_ecto](https://github.com/appodeal/clickhouse_ecto)
|
||||
- Ruby
|
||||
- [Ruby on rails](https://rubyonrails.org/)
|
||||
- [activecube](https://github.com/bitquery/activecube)
|
||||
- [ActiveRecord](https://github.com/PNixx/clickhouse-activerecord)
|
||||
- [GraphQL](https://github.com/graphql)
|
||||
- [activecube-graphql](https://github.com/bitquery/activecube-graphql)
|
||||
|
||||
[元の記事](https://clickhouse.tech/docs/en/interfaces/third-party/integrations/) <!--hide-->
|
||||
|
@ -276,7 +276,7 @@ $ curl -sS 'http://localhost:8123/?max_result_bytes=4000000&buffer_size=3000000&
|
||||
### Пример {#primer}
|
||||
|
||||
``` bash
|
||||
$ curl -sS "<address>?param_id=2¶m_phrase=test" -d "SELECT * FROM table WHERE int_column = {id:UInt8} and string_column = {phrase:String}"
|
||||
$ curl -sS "http://localhost:8123/?param_id=2¶m_phrase=test" -d "SELECT * FROM table WHERE int_column = {id:UInt8} and string_column = {phrase:String}"
|
||||
```
|
||||
|
||||
## Предопределенный HTTP интерфейс {#predefined_http_interface}
|
||||
|
@ -93,5 +93,11 @@
|
||||
- Elixir
|
||||
- [Ecto](https://github.com/elixir-ecto/ecto)
|
||||
- [clickhouse\_ecto](https://github.com/appodeal/clickhouse_ecto)
|
||||
|
||||
- Ruby
|
||||
- [Ruby on Rails](https://rubyonrails.org/)
|
||||
- [activecube](https://github.com/bitquery/activecube)
|
||||
- [ActiveRecord](https://github.com/PNixx/clickhouse-activerecord)
|
||||
- [GraphQL](https://github.com/graphql)
|
||||
- [activecube-graphql](https://github.com/bitquery/activecube-graphql)
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/interfaces/third-party/integrations/) <!--hide-->
|
||||
|
@ -1289,4 +1289,34 @@ SELECT idx, i FROM null_in WHERE i IN (1, NULL) SETTINGS transform_null_in = 1;
|
||||
|
||||
Значение по умолчанию: 16.
|
||||
|
||||
## min_insert_block_size_rows_for_materialized_views {#min-insert-block-size-rows-for-materialized-views}
|
||||
|
||||
Устанавливает минимальное количество строк в блоке, который может быть вставлен в таблицу запросом `INSERT`. Блоки меньшего размера склеиваются в блоки большего размера. Настройка применяется только для блоков, вставляемых в [материализованное представление](../../sql-reference/statements/create.md#create-view). Настройка позволяет избежать избыточного потребления памяти.
|
||||
|
||||
Допустимые значения:
|
||||
|
||||
- Положительное целое число.
|
||||
- 0 — Склейка блоков выключена.
|
||||
|
||||
Значение по умолчанию: 1048576.
|
||||
|
||||
**См. также:**
|
||||
|
||||
- [min_insert_block_size_rows](#min-insert-block-size-rows)
|
||||
|
||||
## min_insert_block_size_bytes_for_materialized_views {#min-insert-block-size-bytes-for-materialized-views}
|
||||
|
||||
Устанавливает минимальное количество байтов в блоке, который может быть вставлен в таблицу запросом `INSERT`. Блоки меньшего размера склеиваются в блоки большего размера. Настройка применяется только для блоков, вставляемых в [материализованное представление](../../sql-reference/statements/create.md#create-view). Настройка позволяет избежать избыточного потребления памяти.
|
||||
|
||||
Допустимые значения:
|
||||
|
||||
- Положительное целое число.
|
||||
- 0 — Склейка блоков выключена.
|
||||
|
||||
Значение по умолчанию: 268435456.
|
||||
|
||||
**См. также:**
|
||||
|
||||
- [min_insert_block_size_bytes](#min-insert-block-size-bytes)
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) <!--hide-->
|
||||
|
@ -85,6 +85,15 @@ def html_to_amp(content):
|
||||
tag.attrs['width'] = '640'
|
||||
if not tag.attrs.get('height'):
|
||||
tag.attrs['height'] = '320'
|
||||
if tag.name == 'iframe':
|
||||
tag.name = 'amp-iframe'
|
||||
tag.attrs['layout'] = 'responsive'
|
||||
del tag.attrs['alt']
|
||||
del tag.attrs['allowfullscreen']
|
||||
if not tag.attrs.get('width'):
|
||||
tag.attrs['width'] = '640'
|
||||
if not tag.attrs.get('height'):
|
||||
tag.attrs['height'] = '320'
|
||||
elif tag.name == 'a':
|
||||
href = tag.attrs.get('href')
|
||||
if href:
|
||||
|
@ -1,5 +1,6 @@
|
||||
import collections
|
||||
import datetime
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
|
||||
@ -39,13 +40,17 @@ def build_nav_entry(root, args):
|
||||
title = meta.get('toc_folder_title', 'hidden')
|
||||
prio = meta.get('toc_priority', 9999)
|
||||
logging.debug(f'Nav entry: {prio}, {title}, {path}')
|
||||
if not content.strip():
|
||||
if meta.get('toc_hidden') or not content.strip():
|
||||
title = 'hidden'
|
||||
if title == 'hidden':
|
||||
title = 'hidden-' + hashlib.sha1(content.encode('utf-8')).hexdigest()
|
||||
if args.nav_limit and len(result_items) >= args.nav_limit:
|
||||
break
|
||||
result_items.append((prio, title, path))
|
||||
result_items = sorted(result_items, key=lambda x: (x[0], x[1]))
|
||||
result = collections.OrderedDict([(item[1], item[2]) for item in result_items])
|
||||
if index_meta.get('toc_hidden_folder'):
|
||||
current_title += '|hidden-folder'
|
||||
return index_meta.get('toc_priority', 10000), current_title, result
|
||||
|
||||
|
||||
|
@ -278,6 +278,10 @@ def minify_website(args):
|
||||
|
||||
def process_benchmark_results(args):
|
||||
benchmark_root = os.path.join(args.website_dir, 'benchmark')
|
||||
required_keys = {
|
||||
'dbms': ['result'],
|
||||
'hardware': ['result', 'system', 'system_full', 'kind']
|
||||
}
|
||||
for benchmark_kind in ['dbms', 'hardware']:
|
||||
results = []
|
||||
results_root = os.path.join(benchmark_root, benchmark_kind, 'results')
|
||||
@ -285,7 +289,11 @@ def process_benchmark_results(args):
|
||||
result_file = os.path.join(results_root, result)
|
||||
logging.debug(f'Reading benchmark result from {result_file}')
|
||||
with open(result_file, 'r') as f:
|
||||
results += json.loads(f.read())
|
||||
result = json.loads(f.read())
|
||||
for item in result:
|
||||
for required_key in required_keys[benchmark_kind]:
|
||||
assert required_key in item, f'No "{required_key}" in {result_file}'
|
||||
results += result
|
||||
results_js = os.path.join(args.output_dir, 'benchmark', benchmark_kind, 'results.js')
|
||||
with open(results_js, 'w') as f:
|
||||
data = json.dumps(results)
|
||||
|
@ -100,5 +100,11 @@ toc_title: Entegrasyonlar
|
||||
- İksir
|
||||
- [Ecto](https://github.com/elixir-ecto/ecto)
|
||||
- [clickhouse\_ecto](https://github.com/appodeal/clickhouse_ecto)
|
||||
- Ruby
|
||||
- [Ruby on Rails](https://rubyonrails.org/)
|
||||
- [activecube](https://github.com/bitquery/activecube)
|
||||
- [ActiveRecord](https://github.com/PNixx/clickhouse-activerecord)
|
||||
- [GraphQL](https://github.com/graphql)
|
||||
- [activecube-graphql](https://github.com/bitquery/activecube-graphql)
|
||||
|
||||
[Orijinal makale](https://clickhouse.tech/docs/en/interfaces/third-party/integrations/) <!--hide-->
|
||||
|
@ -1 +0,0 @@
|
||||
../../en/commercial/support.md
|
23
docs/zh/commercial/support.md
Normal file
23
docs/zh/commercial/support.md
Normal file
@ -0,0 +1,23 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
toc_priority: 3
|
||||
toc_title: "\u788C\u83BD\u7984Support:"
|
||||
---
|
||||
|
||||
# ClickHouse商业支持服务提供商 {#clickhouse-commercial-support-service-providers}
|
||||
|
||||
!!! info "信息"
|
||||
如果您已经推出ClickHouse商业支持服务,请随时 [打开拉取请求](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/support.md) 将其添加到以下列表。
|
||||
|
||||
## 敏锐性 {#altinity}
|
||||
|
||||
隆隆隆隆路虏脢..陇.貌.垄拢卢虏禄and陇.貌路.隆拢脳枚脢虏 隆隆隆隆路虏脢..陇.貌.垄拢卢虏禄.陇 访问 [www.altinity.com](https://www.altinity.com/) 欲了解更多信息.
|
||||
|
||||
## Mafiree {#mafiree}
|
||||
|
||||
[服务说明](http://mafiree.com/clickhouse-analytics-services.php)
|
||||
|
||||
## MinervaDB {#minervadb}
|
||||
|
||||
[服务说明](https://minervadb.com/index.php/clickhouse-consulting-and-support-by-minervadb/)
|
@ -1,4 +1,4 @@
|
||||
# 更换麦树 {#replacingmergetree}
|
||||
# 替换合并树 {#replacingmergetree}
|
||||
|
||||
该引擎和[MergeTree](mergetree.md)的不同之处在于它会删除具有相同主键的重复项。
|
||||
|
||||
@ -23,7 +23,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
||||
请求参数的描述,参考[请求参数](../../../engines/table-engines/mergetree-family/replacingmergetree.md)。
|
||||
|
||||
**替换树参数**
|
||||
**参数**
|
||||
|
||||
- `ver` — 版本列。类型为 `UInt*`, `Date` 或 `DateTime`。可选参数。
|
||||
|
||||
|
@ -1,28 +1,115 @@
|
||||
# 加入我们 {#join}
|
||||
---
|
||||
toc_priority: 40
|
||||
toc_title: 关联表引擎
|
||||
---
|
||||
|
||||
加载好的 JOIN 表数据会常驻内存中。
|
||||
# 关联表引擎 {#join}
|
||||
|
||||
Join(ANY|ALL, LEFT|INNER, k1[, k2, ...])
|
||||
使用 [JOIN](../../../sql-reference/statements/select/join.md#select-join)操作的一种可选的数据结构。
|
||||
|
||||
引擎参数:`ANY|ALL` – 连接修饰;`LEFT|INNER` – 连接类型。更多信息可参考 [JOIN子句](../../../engines/table-engines/special/join.md#select-join)。
|
||||
这些参数设置不用带引号,但必须与要 JOIN 表匹配。 k1,k2,……是 USING 子句中要用于连接的关键列。
|
||||
!!! 注意 "Note"
|
||||
该文档和 [JOIN 语句](../../../sql-reference/statements/select/join.md#select-join) 无关.
|
||||
|
||||
此引擎表不能用于 GLOBAL JOIN 。
|
||||
## 建表语句 {#creating-a-table}
|
||||
|
||||
类似于 Set 引擎,可以使用 INSERT 向表中添加数据。设置为 ANY 时,重复键的数据会被忽略(仅一条用于连接)。设置为 ALL 时,重复键的数据都会用于连接。不能直接对 JOIN 表进行 SELECT。检索其数据的唯一方法是将其作为 JOIN 语句右边的表。
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
(
|
||||
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1],
|
||||
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2],
|
||||
) ENGINE = Join(join_strictness, join_type, k1[, k2, ...])
|
||||
```
|
||||
|
||||
跟 Set 引擎类似,Join 引擎把数据存储在磁盘中。
|
||||
建表语句详情参见[创建表](../../../sql-reference/statements/create.md#create-table-query).
|
||||
|
||||
### 限制和设置 {#join-limitations-and-settings}
|
||||
**引擎参数**
|
||||
|
||||
创建表时,将应用以下设置:
|
||||
- `join_strictness` – [JOIN 限制](../../../sql-reference/statements/select/join.md#select-join-strictness).
|
||||
- `join_type` – [JOIN 类型](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||
- `k1[, k2, ...]` – 进行`JOIN` 操作时 `USING`语句用到的key列
|
||||
|
||||
- join\_use\_nulls
|
||||
- max\_rows\_in\_join
|
||||
- max\_bytes\_in\_join
|
||||
- join\_overflow\_mode
|
||||
- join\_any\_take\_last\_row
|
||||
使用`join_strictness` 和 `join_type` 参数时不需要用引号, 例如, `Join(ANY, LEFT, col1)`. 这些参数必须和进行join操作的表相匹配。否则,CH不会报错,但是可能返回错误的数据。
|
||||
|
||||
该 `Join`-发动机表不能用于 `GLOBAL JOIN` 操作。
|
||||
## 表用法 {#table-usage}
|
||||
|
||||
[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/join/) <!--hide-->
|
||||
### 示例 {#example}
|
||||
|
||||
创建左关联表:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE id_val(`id` UInt32, `val` UInt32) ENGINE = TinyLog
|
||||
```
|
||||
|
||||
``` sql
|
||||
INSERT INTO id_val VALUES (1,11)(2,12)(3,13)
|
||||
```
|
||||
|
||||
创建 `Join` 右边的表:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE id_val_join(`id` UInt32, `val` UInt8) ENGINE = Join(ANY, LEFT, id)
|
||||
```
|
||||
|
||||
``` sql
|
||||
INSERT INTO id_val_join VALUES (1,21)(1,22)(3,23)
|
||||
```
|
||||
|
||||
表关联:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM id_val ANY LEFT JOIN id_val_join USING (id) SETTINGS join_use_nulls = 1
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─id─┬─val─┬─id_val_join.val─┐
|
||||
│ 1 │ 11 │ 21 │
|
||||
│ 2 │ 12 │ ᴺᵁᴸᴸ │
|
||||
│ 3 │ 13 │ 23 │
|
||||
└────┴─────┴─────────────────┘
|
||||
```
|
||||
|
||||
作为一种替换方式,可以从 `Join`表获取数据,需要设置好join的key字段值。
|
||||
|
||||
``` sql
|
||||
SELECT joinGet('id_val_join', 'val', toUInt32(1))
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─joinGet('id_val_join', 'val', toUInt32(1))─┐
|
||||
│ 21 │
|
||||
└────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### 数据查询及插入 {#selecting-and-inserting-data}
|
||||
|
||||
可以使用 `INSERT`语句向 `Join`引擎表中添加数据。如果表是通过指定 `ANY`限制参数来创建的,那么重复key的数据会被忽略。指定 `ALL`限制参数时,所有行记录都会被添加进去。
|
||||
|
||||
不能通过 `SELECT` 语句直接从表中获取数据。请使用下面的方式:
|
||||
- 将表放在 `JOIN` 的右边进行查询
|
||||
- 调用 [joinGet](../../../sql-reference/functions/other-functions.md#joinget)函数,就像从字典中获取数据一样来查询表。
|
||||
|
||||
|
||||
### 使用限制及参数设置 {#join-limitations-and-settings}
|
||||
|
||||
创建表时,会应用下列设置参数:
|
||||
|
||||
- [join\_use\_nulls](../../../operations/settings/settings.md#join_use_nulls)
|
||||
- [max\_rows\_in\_join](../../../operations/settings/query-complexity.md#settings-max_rows_in_join)
|
||||
- [max\_bytes\_in\_join](../../../operations/settings/query-complexity.md#settings-max_bytes_in_join)
|
||||
- [join\_overflow\_mode](../../../operations/settings/query-complexity.md#settings-join_overflow_mode)
|
||||
- [join\_any\_take\_last\_row](../../../operations/settings/settings.md#settings-join_any_take_last_row)
|
||||
|
||||
|
||||
`Join`表不能在 `GLOBAL JOIN`操作中使用
|
||||
|
||||
`Join`表创建及 [查询](../../../sql-reference/statements/select/index.md)时,允许使用[join\_use\_nulls](../../../operations/settings/settings.md#join_use_nulls)参数。如果使用不同的`join_use_nulls`设置,会导致表关联异常(取决于join的类型)。当使用函数 [joinGet](../../../sql-reference/functions/other-functions.md#joinget)时,请在建表和查询语句中使用相同的 `join_use_nulls` 参数设置。
|
||||
|
||||
|
||||
## 数据存储 {#data-storage}
|
||||
|
||||
`Join`表的数据总是保存在内存中。当往表中插入行记录时,CH会将数据块保存在硬盘目录中,这样服务器重启时数据可以恢复。
|
||||
|
||||
如果服务器非正常重启,保存在硬盘上的数据块会丢失或被损坏。这种情况下,需要手动删除被损坏的数据文件。
|
||||
|
||||
|
||||
[原始文档](https://clickhouse.tech/docs/en/operations/table_engines/join/) <!--hide-->
|
||||
|
@ -1,6 +1,6 @@
|
||||
# 记忆 {#memory}
|
||||
# 内存表 {#memory}
|
||||
|
||||
Memory 引擎以未压缩的形式将数据存储在 RAM 中。数据完全以读取时获得的形式存储。换句话说,从这张表中读取是很轻松的。并发数据访问是同步的。锁范围小:读写操作不会相互阻塞。不支持索引。阅读是并行化的。在简单查询上达到最大生产率(超过10 GB /秒),因为没有磁盘读取,不需要解压缩或反序列化数据。(值得注意的是,在许多情况下,与 MergeTree 引擎的性能几乎一样高)。重新启动服务器时,表中的数据消失,表将变为空。通常,使用此表引擎是不合理的。但是,它可用于测试,以及在相对较少的行(最多约100,000,000)上需要最高性能的查询。
|
||||
Memory 引擎以未压缩的形式将数据存储在 RAM 中。数据完全以读取时获得的形式存储。换句话说,从这张表中读取是很轻松的。并发数据访问是同步的。锁范围小:读写操作不会相互阻塞。不支持索引。查询是并行化的。在简单查询上达到最大速率(超过10 GB /秒),因为没有磁盘读取,不需要解压缩或反序列化数据。(值得注意的是,在许多情况下,与 MergeTree 引擎的性能几乎一样高)。重新启动服务器时,表中的数据消失,表将变为空。通常,使用此表引擎是不合理的。但是,它可用于测试,以及在相对较少的行(最多约100,000,000)上需要最高性能的查询。
|
||||
|
||||
Memory 引擎是由系统用于临时表进行外部数据的查询(请参阅 «外部数据用于请求处理» 部分),以及用于实现 `GLOBAL IN`(请参见 «IN 运算符» 部分)。
|
||||
|
||||
|
@ -91,5 +91,10 @@
|
||||
- 仙丹
|
||||
- [Ecto](https://github.com/elixir-ecto/ecto)
|
||||
- [clickhouse\_ecto](https://github.com/appodeal/clickhouse_ecto)
|
||||
- Ruby
|
||||
- [Ruby on Rails](https://rubyonrails.org/)
|
||||
- [activecube](https://github.com/bitquery/activecube)
|
||||
- [GraphQL](https://github.com/graphql)
|
||||
- [activecube-graphql](https://github.com/bitquery/activecube-graphql)
|
||||
|
||||
[来源文章](https://clickhouse.tech/docs/zh/interfaces/third-party/integrations/) <!--hide-->
|
||||
|
@ -33,7 +33,7 @@ ClickHouse 收集的指标项:
|
||||
- 服务用于计算的资源占用的各种指标。
|
||||
- 关于查询处理的常见统计信息。
|
||||
|
||||
可以在 [系统指标](system-tables.md#system_tables-metrics) ,[系统事件](system-tables.md#system_tables-events) 以及[系统异步指标](system-tables.md#system_tables-asynchronous_metrics) 等系统表查看所有的指标项。
|
||||
可以在 [系统指标](system-tables/metrics.md#system_tables-metrics) ,[系统事件](system-tables/events.md#system_tables-events) 以及[系统异步指标](system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) 等系统表查看所有的指标项。
|
||||
|
||||
可以配置ClickHouse 往 [石墨](https://github.com/graphite-project)导入指标。 参考 [石墨部分](server-configuration-parameters/settings.md#server_configuration_parameters-graphite) 配置文件。在配置指标导出之前,需要参考Graphite[官方教程](https://graphite.readthedocs.io/en/latest/install.html)搭建服务。
|
||||
|
||||
|
@ -13,7 +13,7 @@ ClickHouse运行允许分析查询执行的采样探查器。 使用探查器,
|
||||
|
||||
- 设置 [trace\_log](../server-configuration-parameters/settings.md#server_configuration_parameters-trace_log) 服务器配置部分。
|
||||
|
||||
本节配置 [trace\_log](../../operations/system-tables.md#system_tables-trace_log) 系统表包含探查器运行的结果。 它是默认配置的。 请记住,此表中的数据仅对正在运行的服务器有效。 服务器重新启动后,ClickHouse不会清理表,所有存储的虚拟内存地址都可能无效。
|
||||
本节配置 [trace\_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) 系统表包含探查器运行的结果。 它是默认配置的。 请记住,此表中的数据仅对正在运行的服务器有效。 服务器重新启动后,ClickHouse不会清理表,所有存储的虚拟内存地址都可能无效。
|
||||
|
||||
- 设置 [query\_profiler\_cpu\_time\_period\_ns](../settings/settings.md#query_profiler_cpu_time_period_ns) 或 [query\_profiler\_real\_time\_period\_ns](../settings/settings.md#query_profiler_real_time_period_ns) 设置。 这两种设置可以同时使用。
|
||||
|
||||
|
@ -145,10 +145,10 @@ ClickHouse每x秒重新加载内置字典。 这使得编辑字典 “on the fly
|
||||
- interval – The interval for sending, in seconds.
|
||||
- timeout – The timeout for sending data, in seconds.
|
||||
- root\_path – Prefix for keys.
|
||||
- metrics – Sending data from the [系统。指标](../../operations/system-tables.md#system_tables-metrics) 桌子
|
||||
- events – Sending deltas data accumulated for the time period from the [系统。活动](../../operations/system-tables.md#system_tables-events) 桌子
|
||||
- events\_cumulative – Sending cumulative data from the [系统。活动](../../operations/system-tables.md#system_tables-events) 桌子
|
||||
- asynchronous\_metrics – Sending data from the [系统。asynchronous\_metrics](../../operations/system-tables.md#system_tables-asynchronous_metrics) 桌子
|
||||
- metrics – Sending data from the [系统。指标](../../operations/system-tables/metrics.md#system_tables-metrics) 桌子
|
||||
- events – Sending deltas data accumulated for the time period from the [系统。活动](../../operations/system-tables/events.md#system_tables-events) 桌子
|
||||
- events\_cumulative – Sending cumulative data from the [系统。活动](../../operations/system-tables/events.md#system_tables-events) 桌子
|
||||
- asynchronous\_metrics – Sending data from the [系统。asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) 桌子
|
||||
|
||||
您可以配置多个 `<graphite>` 条款 例如,您可以使用它以不同的时间间隔发送不同的数据。
|
||||
|
||||
@ -503,7 +503,7 @@ SSL客户端/服务器配置。
|
||||
|
||||
记录与之关联的事件 [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md). 例如,添加或合并数据。 您可以使用日志来模拟合并算法并比较它们的特征。 您可以可视化合并过程。
|
||||
|
||||
查询记录在 [系统。part\_log](../../operations/system-tables.md#system_tables-part-log) 表,而不是在一个单独的文件。 您可以在以下命令中配置此表的名称 `table` 参数(见下文)。
|
||||
查询记录在 [系统。part\_log](../../operations/system-tables/part_log.md#system_tables-part-log) 表,而不是在一个单独的文件。 您可以在以下命令中配置此表的名称 `table` 参数(见下文)。
|
||||
|
||||
使用以下参数配置日志记录:
|
||||
|
||||
@ -540,7 +540,7 @@ SSL客户端/服务器配置。
|
||||
|
||||
用于记录接收到的查询的设置 [log\_queries=1](../settings/settings.md) 设置。
|
||||
|
||||
查询记录在 [系统。query\_log](../../operations/system-tables.md#system_tables-query_log) 表,而不是在一个单独的文件。 您可以更改表的名称 `table` 参数(见下文)。
|
||||
查询记录在 [系统。query\_log](../../operations/system-tables/query_log.md#system_tables-query_log) 表,而不是在一个单独的文件。 您可以更改表的名称 `table` 参数(见下文)。
|
||||
|
||||
使用以下参数配置日志记录:
|
||||
|
||||
@ -566,7 +566,7 @@ SSL客户端/服务器配置。
|
||||
|
||||
设置用于记录接收到的查询的线程 [log\_query\_threads=1](../settings/settings.md#settings-log-query-threads) 设置。
|
||||
|
||||
查询记录在 [系统。query\_thread\_log](../../operations/system-tables.md#system_tables-query-thread-log) 表,而不是在一个单独的文件。 您可以更改表的名称 `table` 参数(见下文)。
|
||||
查询记录在 [系统。query\_thread\_log](../../operations/system-tables/query_thread_log.md#system_tables-query-thread-log) 表,而不是在一个单独的文件。 您可以更改表的名称 `table` 参数(见下文)。
|
||||
|
||||
使用以下参数配置日志记录:
|
||||
|
||||
@ -590,7 +590,7 @@ SSL客户端/服务器配置。
|
||||
|
||||
## trace\_log {#server_configuration_parameters-trace_log}
|
||||
|
||||
设置为 [trace\_log](../../operations/system-tables.md#system_tables-trace_log) 系统表操作。
|
||||
设置为 [trace\_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) 系统表操作。
|
||||
|
||||
参数:
|
||||
|
||||
|
@ -1165,7 +1165,7 @@ ClickHouse生成异常
|
||||
|
||||
另请参阅:
|
||||
|
||||
- 系统表 [trace\_log](../../operations/system-tables.md#system_tables-trace_log)
|
||||
- 系统表 [trace\_log](../../operations/system-tables/trace_log.md#system_tables-trace_log)
|
||||
|
||||
## query\_profiler\_cpu\_time\_period\_ns {#query_profiler_cpu_time_period_ns}
|
||||
|
||||
@ -1188,7 +1188,7 @@ ClickHouse生成异常
|
||||
|
||||
另请参阅:
|
||||
|
||||
- 系统表 [trace\_log](../../operations/system-tables.md#system_tables-trace_log)
|
||||
- 系统表 [trace\_log](../../operations/system-tables/trace_log.md#system_tables-trace_log)
|
||||
|
||||
## allow\_introspection\_functions {#settings-allow_introspection_functions}
|
||||
|
||||
@ -1204,7 +1204,7 @@ ClickHouse生成异常
|
||||
**另请参阅**
|
||||
|
||||
- [采样查询探查器](../optimizing-performance/sampling-query-profiler.md)
|
||||
- 系统表 [trace\_log](../../operations/system-tables.md#system_tables-trace_log)
|
||||
- 系统表 [trace\_log](../../operations/system-tables/trace_log.md#system_tables-trace_log)
|
||||
|
||||
## input\_format\_parallel\_parsing {#input-format-parallel-parsing}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,8 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
## 系统。asynchronous\_metric\_log {#system-tables-async-log}
|
||||
|
||||
包含以下内容的历史值 `system.asynchronous_log` (见 [系统。asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics))
|
41
docs/zh/operations/system-tables/asynchronous_metrics.md
Normal file
41
docs/zh/operations/system-tables/asynchronous_metrics.md
Normal file
@ -0,0 +1,41 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。asynchronous\_metrics {#system_tables-asynchronous_metrics}
|
||||
|
||||
包含在后台定期计算的指标。 例如,在使用的RAM量。
|
||||
|
||||
列:
|
||||
|
||||
- `metric` ([字符串](../../sql-reference/data-types/string.md)) — Metric name.
|
||||
- `value` ([Float64](../../sql-reference/data-types/float.md)) — Metric value.
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.asynchronous_metrics LIMIT 10
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─metric──────────────────────────────────┬──────value─┐
|
||||
│ jemalloc.background_thread.run_interval │ 0 │
|
||||
│ jemalloc.background_thread.num_runs │ 0 │
|
||||
│ jemalloc.background_thread.num_threads │ 0 │
|
||||
│ jemalloc.retained │ 422551552 │
|
||||
│ jemalloc.mapped │ 1682989056 │
|
||||
│ jemalloc.resident │ 1656446976 │
|
||||
│ jemalloc.metadata_thp │ 0 │
|
||||
│ jemalloc.metadata │ 10226856 │
|
||||
│ UncompressedCacheCells │ 0 │
|
||||
│ MarkCacheFiles │ 0 │
|
||||
└─────────────────────────────────────────┴────────────┘
|
||||
```
|
||||
|
||||
**另请参阅**
|
||||
|
||||
- [监测](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
|
||||
- [系统。指标](../../operations/system-tables/metrics.md#system_tables-metrics) — Contains instantly calculated metrics.
|
||||
- [系统。活动](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that have occurred.
|
||||
- [系统。metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
|
29
docs/zh/operations/system-tables/clusters.md
Normal file
29
docs/zh/operations/system-tables/clusters.md
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。集群 {#system-clusters}
|
||||
|
||||
包含有关配置文件中可用的集群及其中的服务器的信息。
|
||||
|
||||
列:
|
||||
|
||||
- `cluster` (String) — The cluster name.
|
||||
- `shard_num` (UInt32) — The shard number in the cluster, starting from 1.
|
||||
- `shard_weight` (UInt32) — The relative weight of the shard when writing data.
|
||||
- `replica_num` (UInt32) — The replica number in the shard, starting from 1.
|
||||
- `host_name` (String) — The host name, as specified in the config.
|
||||
- `host_address` (String) — The host IP address obtained from DNS.
|
||||
- `port` (UInt16) — The port to use for connecting to the server.
|
||||
- `user` (String) — The name of the user for connecting to the server.
|
||||
- `errors_count` (UInt32)-此主机无法到达副本的次数。
|
||||
- `estimated_recovery_time` (UInt32)-剩下的秒数,直到副本错误计数归零,它被认为是恢复正常。
|
||||
|
||||
请注意 `errors_count` 每个查询集群更新一次,但 `estimated_recovery_time` 按需重新计算。 所以有可能是非零的情况 `errors_count` 和零 `estimated_recovery_time`,下一个查询将为零 `errors_count` 并尝试使用副本,就好像它没有错误。
|
||||
|
||||
**另请参阅**
|
||||
|
||||
- [表引擎分布式](../../engines/table-engines/special/distributed.md)
|
||||
- [distributed\_replica\_error\_cap设置](../../operations/settings/settings.md#settings-distributed_replica_error_cap)
|
||||
- [distributed\_replica\_error\_half\_life设置](../../operations/settings/settings.md#settings-distributed_replica_error_half_life)
|
27
docs/zh/operations/system-tables/columns.md
Normal file
27
docs/zh/operations/system-tables/columns.md
Normal file
@ -0,0 +1,27 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。列 {#system-columns}
|
||||
|
||||
包含有关所有表中列的信息。
|
||||
|
||||
您可以使用此表获取类似于以下内容的信息 [DESCRIBE TABLE](../../sql-reference/statements/misc.md#misc-describe-table) 查询,但对于多个表一次。
|
||||
|
||||
该 `system.columns` 表包含以下列(列类型显示在括号中):
|
||||
|
||||
- `database` (String) — Database name.
|
||||
- `table` (String) — Table name.
|
||||
- `name` (String) — Column name.
|
||||
- `type` (String) — Column type.
|
||||
- `default_kind` (String) — Expression type (`DEFAULT`, `MATERIALIZED`, `ALIAS`)为默认值,如果没有定义,则为空字符串。
|
||||
- `default_expression` (String) — Expression for the default value, or an empty string if it is not defined.
|
||||
- `data_compressed_bytes` (UInt64) — The size of compressed data, in bytes.
|
||||
- `data_uncompressed_bytes` (UInt64) — The size of decompressed data, in bytes.
|
||||
- `marks_bytes` (UInt64) — The size of marks, in bytes.
|
||||
- `comment` (String) — Comment on the column, or an empty string if it is not defined.
|
||||
- `is_in_partition_key` (UInt8) — Flag that indicates whether the column is in the partition expression.
|
||||
- `is_in_sorting_key` (UInt8) — Flag that indicates whether the column is in the sorting key expression.
|
||||
- `is_in_primary_key` (UInt8) — Flag that indicates whether the column is in the primary key expression.
|
||||
- `is_in_sampling_key` (UInt8) — Flag that indicates whether the column is in the sampling key expression.
|
45
docs/zh/operations/system-tables/contributors.md
Normal file
45
docs/zh/operations/system-tables/contributors.md
Normal file
@ -0,0 +1,45 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。贡献者 {#system-contributors}
|
||||
|
||||
包含有关贡献者的信息。 该顺序在查询执行时是随机的。
|
||||
|
||||
列:
|
||||
|
||||
- `name` (String) — Contributor (author) name from git log.
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.contributors LIMIT 10
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name─────────────┐
|
||||
│ Olga Khvostikova │
|
||||
│ Max Vetrov │
|
||||
│ LiuYangkuan │
|
||||
│ svladykin │
|
||||
│ zamulla │
|
||||
│ Šimon Podlipský │
|
||||
│ BayoNet │
|
||||
│ Ilya Khomutov │
|
||||
│ Amy Krishnevsky │
|
||||
│ Loud_Scream │
|
||||
└──────────────────┘
|
||||
```
|
||||
|
||||
要在表中找出自己,请使用查询:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.contributors WHERE name = 'Olga Khvostikova'
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name─────────────┐
|
||||
│ Olga Khvostikova │
|
||||
└──────────────────┘
|
||||
```
|
39
docs/zh/operations/system-tables/data_type_families.md
Normal file
39
docs/zh/operations/system-tables/data_type_families.md
Normal file
@ -0,0 +1,39 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。data\_type\_families {#system_tables-data_type_families}
|
||||
|
||||
包含有关受支持的信息 [数据类型](../../sql-reference/data-types/).
|
||||
|
||||
列:
|
||||
|
||||
- `name` ([字符串](../../sql-reference/data-types/string.md)) — Data type name.
|
||||
- `case_insensitive` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Property that shows whether you can use a data type name in a query in case insensitive manner or not. For example, `Date` 和 `date` 都是有效的。
|
||||
- `alias_to` ([字符串](../../sql-reference/data-types/string.md)) — Data type name for which `name` 是个化名
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.data_type_families WHERE alias_to = 'String'
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name───────┬─case_insensitive─┬─alias_to─┐
|
||||
│ LONGBLOB │ 1 │ String │
|
||||
│ LONGTEXT │ 1 │ String │
|
||||
│ TINYTEXT │ 1 │ String │
|
||||
│ TEXT │ 1 │ String │
|
||||
│ VARCHAR │ 1 │ String │
|
||||
│ MEDIUMBLOB │ 1 │ String │
|
||||
│ BLOB │ 1 │ String │
|
||||
│ TINYBLOB │ 1 │ String │
|
||||
│ CHAR │ 1 │ String │
|
||||
│ MEDIUMTEXT │ 1 │ String │
|
||||
└────────────┴──────────────────┴──────────┘
|
||||
```
|
||||
|
||||
**另请参阅**
|
||||
|
||||
- [语法](../../sql-reference/syntax.md) — Information about supported syntax.
|
12
docs/zh/operations/system-tables/databases.md
Normal file
12
docs/zh/operations/system-tables/databases.md
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。数据库 {#system-databases}
|
||||
|
||||
此表包含一个名为"字符串"的列 ‘name’ – the name of a database.
|
||||
|
||||
服务器知道的每个数据库在表中都有相应的条目。
|
||||
|
||||
该系统表用于实现 `SHOW DATABASES` 查询。
|
14
docs/zh/operations/system-tables/detached_parts.md
Normal file
14
docs/zh/operations/system-tables/detached_parts.md
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。detached\_parts {#system_tables-detached_parts}
|
||||
|
||||
包含有关分离部分的信息 [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) 桌子 该 `reason` 列指定分离部件的原因。
|
||||
|
||||
对于用户分离的部件,原因是空的。 这些部件可以附加 [ALTER TABLE ATTACH PARTITION\|PART](../../sql-reference/statements/alter.md#alter_attach-partition) 指挥部
|
||||
|
||||
有关其他列的说明,请参阅 [系统。零件](../../operations/system-tables/parts.md#system_tables-parts).
|
||||
|
||||
如果部件名称无效,某些列的值可能为 `NULL`. 这些部分可以删除 [ALTER TABLE DROP DETACHED PART](../../sql-reference/statements/alter.md#alter_drop-detached).
|
66
docs/zh/operations/system-tables/dictionaries.md
Normal file
66
docs/zh/operations/system-tables/dictionaries.md
Normal file
@ -0,0 +1,66 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。字典 {#system_tables-dictionaries}
|
||||
|
||||
包含以下信息 [外部字典](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
||||
|
||||
列:
|
||||
|
||||
- `database` ([字符串](../../sql-reference/data-types/string.md)) — Name of the database containing the dictionary created by DDL query. Empty string for other dictionaries.
|
||||
- `name` ([字符串](../../sql-reference/data-types/string.md)) — [字典名称](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md).
|
||||
- `status` ([枚举8](../../sql-reference/data-types/enum.md)) — Dictionary status. Possible values:
|
||||
- `NOT_LOADED` — Dictionary was not loaded because it was not used.
|
||||
- `LOADED` — Dictionary loaded successfully.
|
||||
- `FAILED` — Unable to load the dictionary as a result of an error.
|
||||
- `LOADING` — Dictionary is loading now.
|
||||
- `LOADED_AND_RELOADING` — Dictionary is loaded successfully, and is being reloaded right now (frequent reasons: [SYSTEM RELOAD DICTIONARY](../../sql-reference/statements/system.md#query_language-system-reload-dictionary) 查询,超时,字典配置已更改)。
|
||||
- `FAILED_AND_RELOADING` — Could not load the dictionary as a result of an error and is loading now.
|
||||
- `origin` ([字符串](../../sql-reference/data-types/string.md)) — Path to the configuration file that describes the dictionary.
|
||||
- `type` ([字符串](../../sql-reference/data-types/string.md)) — Type of a dictionary allocation. [在内存中存储字典](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md).
|
||||
- `key` — [密钥类型](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key):数字键 ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) or Сomposite key ([字符串](../../sql-reference/data-types/string.md)) — form “(type 1, type 2, …, type n)”.
|
||||
- `attribute.names` ([阵列](../../sql-reference/data-types/array.md)([字符串](../../sql-reference/data-types/string.md))) — Array of [属性名称](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes) 由字典提供。
|
||||
- `attribute.types` ([阵列](../../sql-reference/data-types/array.md)([字符串](../../sql-reference/data-types/string.md))) — Corresponding array of [属性类型](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes) 这是由字典提供。
|
||||
- `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Amount of RAM allocated for the dictionary.
|
||||
- `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of queries since the dictionary was loaded or since the last successful reboot.
|
||||
- `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — For cache dictionaries, the percentage of uses for which the value was in the cache.
|
||||
- `element_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of items stored in the dictionary.
|
||||
- `load_factor` ([Float64](../../sql-reference/data-types/float.md)) — Percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table).
|
||||
- `source` ([字符串](../../sql-reference/data-types/string.md)) — Text describing the [数据源](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) 为了字典
|
||||
- `lifetime_min` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Minimum [使用寿命](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) 在内存中的字典,之后ClickHouse尝试重新加载字典(如果 `invalidate_query` 被设置,那么只有当它已经改变)。 在几秒钟内设置。
|
||||
- `lifetime_max` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Maximum [使用寿命](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) 在内存中的字典,之后ClickHouse尝试重新加载字典(如果 `invalidate_query` 被设置,那么只有当它已经改变)。 在几秒钟内设置。
|
||||
- `loading_start_time` ([日期时间](../../sql-reference/data-types/datetime.md)) — Start time for loading the dictionary.
|
||||
- `last_successful_update_time` ([日期时间](../../sql-reference/data-types/datetime.md)) — End time for loading or updating the dictionary. Helps to monitor some troubles with external sources and investigate causes.
|
||||
- `loading_duration` ([Float32](../../sql-reference/data-types/float.md)) — Duration of a dictionary loading.
|
||||
- `last_exception` ([字符串](../../sql-reference/data-types/string.md)) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn't be created.
|
||||
|
||||
**示例**
|
||||
|
||||
配置字典。
|
||||
|
||||
``` sql
|
||||
CREATE DICTIONARY dictdb.dict
|
||||
(
|
||||
`key` Int64 DEFAULT -1,
|
||||
`value_default` String DEFAULT 'world',
|
||||
`value_expression` String DEFAULT 'xxx' EXPRESSION 'toString(127 * 172)'
|
||||
)
|
||||
PRIMARY KEY key
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dicttbl' DB 'dictdb'))
|
||||
LIFETIME(MIN 0 MAX 1)
|
||||
LAYOUT(FLAT())
|
||||
```
|
||||
|
||||
确保字典已加载。
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.dictionaries
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─database─┬─name─┬─status─┬─origin──────┬─type─┬─key────┬─attribute.names──────────────────────┬─attribute.types─────┬─bytes_allocated─┬─query_count─┬─hit_rate─┬─element_count─┬───────────load_factor─┬─source─────────────────────┬─lifetime_min─┬─lifetime_max─┬──loading_start_time─┌──last_successful_update_time─┬──────loading_duration─┬─last_exception─┐
|
||||
│ dictdb │ dict │ LOADED │ dictdb.dict │ Flat │ UInt64 │ ['value_default','value_expression'] │ ['String','String'] │ 74032 │ 0 │ 1 │ 1 │ 0.0004887585532746823 │ ClickHouse: dictdb.dicttbl │ 0 │ 1 │ 2020-03-04 04:17:34 │ 2020-03-04 04:30:34 │ 0.002 │ │
|
||||
└──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘
|
||||
```
|
31
docs/zh/operations/system-tables/disks.md
Normal file
31
docs/zh/operations/system-tables/disks.md
Normal file
@ -0,0 +1,31 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。磁盘 {#system_tables-disks}
|
||||
|
||||
包含有关在定义的磁盘信息 [服务器配置](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes_configure).
|
||||
|
||||
列:
|
||||
|
||||
- `name` ([字符串](../../sql-reference/data-types/string.md)) — Name of a disk in the server configuration.
|
||||
- `path` ([字符串](../../sql-reference/data-types/string.md)) — Path to the mount point in the file system.
|
||||
- `free_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Free space on disk in bytes.
|
||||
- `total_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Disk volume in bytes.
|
||||
- `keep_free_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Amount of disk space that should stay free on disk in bytes. Defined in the `keep_free_space_bytes` 磁盘配置参数。
|
||||
|
||||
## 系统。storage\_policies {#system_tables-storage_policies}
|
||||
|
||||
包含有关存储策略和卷中定义的信息 [服务器配置](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes_configure).
|
||||
|
||||
列:
|
||||
|
||||
- `policy_name` ([字符串](../../sql-reference/data-types/string.md)) — Name of the storage policy.
|
||||
- `volume_name` ([字符串](../../sql-reference/data-types/string.md)) — Volume name defined in the storage policy.
|
||||
- `volume_priority` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Volume order number in the configuration.
|
||||
- `disks` ([数组(字符串)](../../sql-reference/data-types/array.md)) — Disk names, defined in the storage policy.
|
||||
- `max_data_part_size` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Maximum size of a data part that can be stored on volume disks (0 — no limit).
|
||||
- `move_factor` ([Float64](../../sql-reference/data-types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order.
|
||||
|
||||
如果存储策略包含多个卷,则每个卷的信息将存储在表的单独行中。
|
37
docs/zh/operations/system-tables/events.md
Normal file
37
docs/zh/operations/system-tables/events.md
Normal file
@ -0,0 +1,37 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。活动 {#system_tables-events}
|
||||
|
||||
包含有关系统中发生的事件数的信息。 例如,在表中,您可以找到多少 `SELECT` 自ClickHouse服务器启动以来已处理查询。
|
||||
|
||||
列:
|
||||
|
||||
- `event` ([字符串](../../sql-reference/data-types/string.md)) — Event name.
|
||||
- `value` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of events occurred.
|
||||
- `description` ([字符串](../../sql-reference/data-types/string.md)) — Event description.
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.events LIMIT 5
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─event─────────────────────────────────┬─value─┬─description────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Query │ 12 │ Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries. │
|
||||
│ SelectQuery │ 8 │ Same as Query, but only for SELECT queries. │
|
||||
│ FileOpen │ 73 │ Number of files opened. │
|
||||
│ ReadBufferFromFileDescriptorRead │ 155 │ Number of reads (read/pread) from a file descriptor. Does not include sockets. │
|
||||
│ ReadBufferFromFileDescriptorReadBytes │ 9931 │ Number of bytes read from file descriptors. If the file is compressed, this will show the compressed data size. │
|
||||
└───────────────────────────────────────┴───────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**另请参阅**
|
||||
|
||||
- [系统。asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) — Contains periodically calculated metrics.
|
||||
- [系统。指标](../../operations/system-tables/metrics.md#system_tables-metrics) — Contains instantly calculated metrics.
|
||||
- [系统。metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
|
||||
- [监测](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
|
13
docs/zh/operations/system-tables/functions.md
Normal file
13
docs/zh/operations/system-tables/functions.md
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。功能 {#system-functions}
|
||||
|
||||
包含有关正常函数和聚合函数的信息。
|
||||
|
||||
列:
|
||||
|
||||
- `name`(`String`) – The name of the function.
|
||||
- `is_aggregate`(`UInt8`) — Whether the function is aggregate.
|
20
docs/zh/operations/system-tables/graphite_retentions.md
Normal file
20
docs/zh/operations/system-tables/graphite_retentions.md
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。graphite\_retentions {#system-graphite-retentions}
|
||||
|
||||
包含有关参数的信息 [graphite\_rollup](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-graphite) 这是在表中使用 [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md) 引擎
|
||||
|
||||
列:
|
||||
|
||||
- `config_name` (字符串) - `graphite_rollup` 参数名称。
|
||||
- `regexp` (String)-指标名称的模式。
|
||||
- `function` (String)-聚合函数的名称。
|
||||
- `age` (UInt64)-以秒为单位的数据的最小期限。
|
||||
- `precision` (UInt64)-如何精确地定义以秒为单位的数据的年龄。
|
||||
- `priority` (UInt16)-模式优先级。
|
||||
- `is_default` (UInt8)-模式是否为默认值。
|
||||
- `Tables.database` (Array(String))-使用数据库表名称的数组 `config_name` 参数。
|
||||
- `Tables.table` (Array(String))-使用表名称的数组 `config_name` 参数。
|
50
docs/zh/operations/system-tables/index.md
Normal file
50
docs/zh/operations/system-tables/index.md
Normal file
@ -0,0 +1,50 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
toc_priority: 52
|
||||
toc_title: "\u7CFB\u7EDF\u8868"
|
||||
---
|
||||
|
||||
# 系统表 {#system-tables}
|
||||
|
||||
## 导言 {#system-tables-introduction}
|
||||
|
||||
系统表提供以下信息:
|
||||
|
||||
- 服务器状态、进程和环境。
|
||||
- 服务器的内部进程。
|
||||
|
||||
系统表:
|
||||
|
||||
- 坐落于 `system` 数据库。
|
||||
- 仅适用于读取数据。
|
||||
- 不能删除或更改,但可以分离。
|
||||
|
||||
大多数系统表将数据存储在RAM中。 ClickHouse服务器在开始时创建此类系统表。
|
||||
|
||||
与其他系统表不同,系统表 [metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log), [query\_log](../../operations/system-tables/query_log.md#system_tables-query_log), [query\_thread\_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log), [trace\_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) 由 [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) 表引擎并将其数据存储在存储文件系统中。 如果从文件系统中删除表,ClickHouse服务器会在下一次写入数据时再次创建空表。 如果系统表架构在新版本中发生更改,则ClickHouse会重命名当前表并创建一个新表。
|
||||
|
||||
默认情况下,表增长是无限的。 要控制表的大小,可以使用 [TTL](../../sql-reference/statements/alter.md#manipulations-with-table-ttl) 删除过期日志记录的设置。 你也可以使用分区功能 `MergeTree`-发动机表。
|
||||
|
||||
## 系统指标的来源 {#system-tables-sources-of-system-metrics}
|
||||
|
||||
用于收集ClickHouse服务器使用的系统指标:
|
||||
|
||||
- `CAP_NET_ADMIN` 能力。
|
||||
- [procfs](https://en.wikipedia.org/wiki/Procfs) (仅在Linux中)。
|
||||
|
||||
**procfs**
|
||||
|
||||
如果ClickHouse服务器没有 `CAP_NET_ADMIN` 能力,它试图回落到 `ProcfsMetricsProvider`. `ProcfsMetricsProvider` 允许收集每个查询系统指标(用于CPU和I/O)。
|
||||
|
||||
如果系统上支持并启用procfs,ClickHouse server将收集这些指标:
|
||||
|
||||
- `OSCPUVirtualTimeMicroseconds`
|
||||
- `OSCPUWaitMicroseconds`
|
||||
- `OSIOWaitMicroseconds`
|
||||
- `OSReadChars`
|
||||
- `OSWriteChars`
|
||||
- `OSReadBytes`
|
||||
- `OSWriteBytes`
|
||||
|
||||
[原始文章](https://clickhouse.tech/docs/en/operations/system-tables/) <!--hide-->
|
16
docs/zh/operations/system-tables/merge_tree_settings.md
Normal file
16
docs/zh/operations/system-tables/merge_tree_settings.md
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。merge\_tree\_settings {#system-merge_tree_settings}
|
||||
|
||||
包含有关以下设置的信息 `MergeTree` 桌子
|
||||
|
||||
列:
|
||||
|
||||
- `name` (String) — Setting name.
|
||||
- `value` (String) — Setting value.
|
||||
- `description` (String) — Setting description.
|
||||
- `type` (String) — Setting type (implementation specific string value).
|
||||
- `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed.
|
24
docs/zh/operations/system-tables/merges.md
Normal file
24
docs/zh/operations/system-tables/merges.md
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。合并 {#system-merges}
|
||||
|
||||
包含有关MergeTree系列中表当前正在进行的合并和部件突变的信息。
|
||||
|
||||
列:
|
||||
|
||||
- `database` (String) — The name of the database the table is in.
|
||||
- `table` (String) — Table name.
|
||||
- `elapsed` (Float64) — The time elapsed (in seconds) since the merge started.
|
||||
- `progress` (Float64) — The percentage of completed work from 0 to 1.
|
||||
- `num_parts` (UInt64) — The number of pieces to be merged.
|
||||
- `result_part_name` (String) — The name of the part that will be formed as the result of merging.
|
||||
- `is_mutation` (UInt8)-1如果这个过程是一个部分突变.
|
||||
- `total_size_bytes_compressed` (UInt64) — The total size of the compressed data in the merged chunks.
|
||||
- `total_size_marks` (UInt64) — The total number of marks in the merged parts.
|
||||
- `bytes_read_uncompressed` (UInt64) — Number of bytes read, uncompressed.
|
||||
- `rows_read` (UInt64) — Number of rows read.
|
||||
- `bytes_written_uncompressed` (UInt64) — Number of bytes written, uncompressed.
|
||||
- `rows_written` (UInt64) — Number of rows written.
|
60
docs/zh/operations/system-tables/metric_log.md
Normal file
60
docs/zh/operations/system-tables/metric_log.md
Normal file
@ -0,0 +1,60 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。metric\_log {#system_tables-metric_log}
|
||||
|
||||
包含表中度量值的历史记录 `system.metrics` 和 `system.events`,定期刷新到磁盘。
|
||||
打开指标历史记录收集 `system.metric_log`,创建 `/etc/clickhouse-server/config.d/metric_log.xml` 具有以下内容:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<metric_log>
|
||||
<database>system</database>
|
||||
<table>metric_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<collect_interval_milliseconds>1000</collect_interval_milliseconds>
|
||||
</metric_log>
|
||||
</yandex>
|
||||
```
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.metric_log LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
event_date: 2020-02-18
|
||||
event_time: 2020-02-18 07:15:33
|
||||
milliseconds: 554
|
||||
ProfileEvent_Query: 0
|
||||
ProfileEvent_SelectQuery: 0
|
||||
ProfileEvent_InsertQuery: 0
|
||||
ProfileEvent_FileOpen: 0
|
||||
ProfileEvent_Seek: 0
|
||||
ProfileEvent_ReadBufferFromFileDescriptorRead: 1
|
||||
ProfileEvent_ReadBufferFromFileDescriptorReadFailed: 0
|
||||
ProfileEvent_ReadBufferFromFileDescriptorReadBytes: 0
|
||||
ProfileEvent_WriteBufferFromFileDescriptorWrite: 1
|
||||
ProfileEvent_WriteBufferFromFileDescriptorWriteFailed: 0
|
||||
ProfileEvent_WriteBufferFromFileDescriptorWriteBytes: 56
|
||||
...
|
||||
CurrentMetric_Query: 0
|
||||
CurrentMetric_Merge: 0
|
||||
CurrentMetric_PartMutation: 0
|
||||
CurrentMetric_ReplicatedFetch: 0
|
||||
CurrentMetric_ReplicatedSend: 0
|
||||
CurrentMetric_ReplicatedChecks: 0
|
||||
...
|
||||
```
|
||||
|
||||
**另请参阅**
|
||||
|
||||
- [系统。asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) — Contains periodically calculated metrics.
|
||||
- [系统。活动](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that occurred.
|
||||
- [系统。指标](../../operations/system-tables/metrics.md#system_tables-metrics) — Contains instantly calculated metrics.
|
||||
- [监测](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
|
44
docs/zh/operations/system-tables/metrics.md
Normal file
44
docs/zh/operations/system-tables/metrics.md
Normal file
@ -0,0 +1,44 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。指标 {#system_tables-metrics}
|
||||
|
||||
包含可以立即计算或具有当前值的指标。 例如,同时处理的查询的数量或当前副本的延迟。 此表始终是最新的。
|
||||
|
||||
列:
|
||||
|
||||
- `metric` ([字符串](../../sql-reference/data-types/string.md)) — Metric name.
|
||||
- `value` ([Int64](../../sql-reference/data-types/int-uint.md)) — Metric value.
|
||||
- `description` ([字符串](../../sql-reference/data-types/string.md)) — Metric description.
|
||||
|
||||
支持的指标列表,您可以在 [src/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/CurrentMetrics.cpp) ClickHouse的源文件。
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.metrics LIMIT 10
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─metric─────────────────────┬─value─┬─description──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Query │ 1 │ Number of executing queries │
|
||||
│ Merge │ 0 │ Number of executing background merges │
|
||||
│ PartMutation │ 0 │ Number of mutations (ALTER DELETE/UPDATE) │
|
||||
│ ReplicatedFetch │ 0 │ Number of data parts being fetched from replicas │
|
||||
│ ReplicatedSend │ 0 │ Number of data parts being sent to replicas │
|
||||
│ ReplicatedChecks │ 0 │ Number of data parts checking for consistency │
|
||||
│ BackgroundPoolTask │ 0 │ Number of active tasks in BackgroundProcessingPool (merges, mutations, fetches, or replication queue bookkeeping) │
|
||||
│ BackgroundSchedulePoolTask │ 0 │ Number of active tasks in BackgroundSchedulePool. This pool is used for periodic ReplicatedMergeTree tasks, like cleaning old data parts, altering data parts, replica re-initialization, etc. │
|
||||
│ DiskSpaceReservedForMerge │ 0 │ Disk space reserved for currently running background merges. It is slightly more than the total size of currently merging parts. │
|
||||
│ DistributedSend │ 0 │ Number of connections to remote servers sending data that was INSERTed into Distributed tables. Both synchronous and asynchronous mode. │
|
||||
└────────────────────────────┴───────┴──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**另请参阅**
|
||||
|
||||
- [系统。asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) — Contains periodically calculated metrics.
|
||||
- [系统。活动](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that occurred.
|
||||
- [系统。metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
|
||||
- [监测](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
|
30
docs/zh/operations/system-tables/mutations.md
Normal file
30
docs/zh/operations/system-tables/mutations.md
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。突变 {#system_tables-mutations}
|
||||
|
||||
该表包含以下信息 [突变](../../sql-reference/statements/alter.md#alter-mutations) MergeTree表及其进展。 每个突变命令由一行表示。 该表具有以下列:
|
||||
|
||||
**数据库**, **表** -应用突变的数据库和表的名称。
|
||||
|
||||
**mutation\_id** -变异的ID 对于复制的表,这些Id对应于znode中的名称 `<table_path_in_zookeeper>/mutations/` 动物园管理员的目录。 对于未复制的表,Id对应于表的数据目录中的文件名。
|
||||
|
||||
**命令** -Mutation命令字符串(查询后的部分 `ALTER TABLE [db.]table`).
|
||||
|
||||
**create\_time** -当这个突变命令被提交执行。
|
||||
|
||||
**block\_numbers.partition\_id**, **block\_numbers.编号** -嵌套列。 对于复制表的突变,它包含每个分区的一条记录:分区ID和通过突变获取的块编号(在每个分区中,只有包含编号小于该分区中突变获取的块编号的块的 在非复制表中,所有分区中的块编号形成一个序列。 这意味着对于非复制表的突变,该列将包含一条记录,其中包含由突变获取的单个块编号。
|
||||
|
||||
**parts\_to\_do** -为了完成突变,需要突变的数据部分的数量。
|
||||
|
||||
**is\_done** -变异完成了?? 请注意,即使 `parts_to_do = 0` 由于长时间运行的INSERT将创建需要突变的新数据部分,因此可能尚未完成复制表的突变。
|
||||
|
||||
如果在改变某些部分时出现问题,以下列将包含其他信息:
|
||||
|
||||
**latest\_failed\_part** -不能变异的最新部分的名称。
|
||||
|
||||
**latest\_fail\_time** -最近的部分突变失败的时间。
|
||||
|
||||
**latest\_fail\_reason** -导致最近部件变异失败的异常消息。
|
12
docs/zh/operations/system-tables/numbers.md
Normal file
12
docs/zh/operations/system-tables/numbers.md
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。数字 {#system-numbers}
|
||||
|
||||
此表包含一个名为UInt64的列 `number` 它包含几乎所有从零开始的自然数。
|
||||
|
||||
您可以使用此表进行测试,或者如果您需要进行暴力搜索。
|
||||
|
||||
从此表中读取的内容不是并行的。
|
10
docs/zh/operations/system-tables/numbers_mt.md
Normal file
10
docs/zh/operations/system-tables/numbers_mt.md
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。numbers\_mt {#system-numbers-mt}
|
||||
|
||||
一样的 [系统。数字](../../operations/system-tables/numbers.md) 但读取是并行的。 这些数字可以以任何顺序返回。
|
||||
|
||||
用于测试。
|
12
docs/zh/operations/system-tables/one.md
Normal file
12
docs/zh/operations/system-tables/one.md
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。一 {#system-one}
|
||||
|
||||
此表包含一行,其中包含一行 `dummy` UInt8列包含值0。
|
||||
|
||||
如果使用此表 `SELECT` 查询不指定 `FROM` 条款
|
||||
|
||||
这类似于 `DUAL` 表在其他Dbms中找到。
|
37
docs/zh/operations/system-tables/part_log.md
Normal file
37
docs/zh/operations/system-tables/part_log.md
Normal file
@ -0,0 +1,37 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。part\_log {#system_tables-part-log}
|
||||
|
||||
该 `system.part_log` 表只有当创建 [part\_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-part-log) 指定了服务器设置。
|
||||
|
||||
此表包含与以下情况发生的事件有关的信息 [数据部分](../../engines/table-engines/mergetree-family/custom-partitioning-key.md) 在 [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) 家庭表,例如添加或合并数据。
|
||||
|
||||
该 `system.part_log` 表包含以下列:
|
||||
|
||||
- `event_type` (Enum) — Type of the event that occurred with the data part. Can have one of the following values:
|
||||
- `NEW_PART` — Inserting of a new data part.
|
||||
- `MERGE_PARTS` — Merging of data parts.
|
||||
- `DOWNLOAD_PART` — Downloading a data part.
|
||||
- `REMOVE_PART` — Removing or detaching a data part using [DETACH PARTITION](../../sql-reference/statements/alter.md#alter_detach-partition).
|
||||
- `MUTATE_PART` — Mutating of a data part.
|
||||
- `MOVE_PART` — Moving the data part from the one disk to another one.
|
||||
- `event_date` (Date) — Event date.
|
||||
- `event_time` (DateTime) — Event time.
|
||||
- `duration_ms` (UInt64) — Duration.
|
||||
- `database` (String) — Name of the database the data part is in.
|
||||
- `table` (String) — Name of the table the data part is in.
|
||||
- `part_name` (String) — Name of the data part.
|
||||
- `partition_id` (String) — ID of the partition that the data part was inserted to. The column takes the ‘all’ 值,如果分区是由 `tuple()`.
|
||||
- `rows` (UInt64) — The number of rows in the data part.
|
||||
- `size_in_bytes` (UInt64) — Size of the data part in bytes.
|
||||
- `merged_from` (Array(String)) — An array of names of the parts which the current part was made up from (after the merge).
|
||||
- `bytes_uncompressed` (UInt64) — Size of uncompressed bytes.
|
||||
- `read_rows` (UInt64) — The number of rows was read during the merge.
|
||||
- `read_bytes` (UInt64) — The number of bytes was read during the merge.
|
||||
- `error` (UInt16) — The code number of the occurred error.
|
||||
- `exception` (String) — Text message of the occurred error.
|
||||
|
||||
该 `system.part_log` 表的第一个插入数据到后创建 `MergeTree` 桌子
|
85
docs/zh/operations/system-tables/parts.md
Normal file
85
docs/zh/operations/system-tables/parts.md
Normal file
@ -0,0 +1,85 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。零件 {#system_tables-parts}
|
||||
|
||||
包含有关的部分信息 [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) 桌子
|
||||
|
||||
每行描述一个数据部分。
|
||||
|
||||
列:
|
||||
|
||||
- `partition` (String) – The partition name. To learn what a partition is, see the description of the [ALTER](../../sql-reference/statements/alter.md#query_language_queries_alter) 查询。
|
||||
|
||||
格式:
|
||||
|
||||
- `YYYYMM` 用于按月自动分区。
|
||||
- `any_string` 手动分区时。
|
||||
|
||||
- `name` (`String`) – Name of the data part.
|
||||
|
||||
- `active` (`UInt8`) – Flag that indicates whether the data part is active. If a data part is active, it's used in a table. Otherwise, it's deleted. Inactive data parts remain after merging.
|
||||
|
||||
- `marks` (`UInt64`) – The number of marks. To get the approximate number of rows in a data part, multiply `marks` 通过索引粒度(通常为8192)(此提示不适用于自适应粒度)。
|
||||
|
||||
- `rows` (`UInt64`) – The number of rows.
|
||||
|
||||
- `bytes_on_disk` (`UInt64`) – Total size of all the data part files in bytes.
|
||||
|
||||
- `data_compressed_bytes` (`UInt64`) – Total size of compressed data in the data part. All the auxiliary files (for example, files with marks) are not included.
|
||||
|
||||
- `data_uncompressed_bytes` (`UInt64`) – Total size of uncompressed data in the data part. All the auxiliary files (for example, files with marks) are not included.
|
||||
|
||||
- `marks_bytes` (`UInt64`) – The size of the file with marks.
|
||||
|
||||
- `modification_time` (`DateTime`) – The time the directory with the data part was modified. This usually corresponds to the time of data part creation.\|
|
||||
|
||||
- `remove_time` (`DateTime`) – The time when the data part became inactive.
|
||||
|
||||
- `refcount` (`UInt32`) – The number of places where the data part is used. A value greater than 2 indicates that the data part is used in queries or merges.
|
||||
|
||||
- `min_date` (`Date`) – The minimum value of the date key in the data part.
|
||||
|
||||
- `max_date` (`Date`) – The maximum value of the date key in the data part.
|
||||
|
||||
- `min_time` (`DateTime`) – The minimum value of the date and time key in the data part.
|
||||
|
||||
- `max_time`(`DateTime`) – The maximum value of the date and time key in the data part.
|
||||
|
||||
- `partition_id` (`String`) – ID of the partition.
|
||||
|
||||
- `min_block_number` (`UInt64`) – The minimum number of data parts that make up the current part after merging.
|
||||
|
||||
- `max_block_number` (`UInt64`) – The maximum number of data parts that make up the current part after merging.
|
||||
|
||||
- `level` (`UInt32`) – Depth of the merge tree. Zero means that the current part was created by insert rather than by merging other parts.
|
||||
|
||||
- `data_version` (`UInt64`) – Number that is used to determine which mutations should be applied to the data part (mutations with a version higher than `data_version`).
|
||||
|
||||
- `primary_key_bytes_in_memory` (`UInt64`) – The amount of memory (in bytes) used by primary key values.
|
||||
|
||||
- `primary_key_bytes_in_memory_allocated` (`UInt64`) – The amount of memory (in bytes) reserved for primary key values.
|
||||
|
||||
- `is_frozen` (`UInt8`) – Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup doesn't exist. For more details, see [FREEZE PARTITION](../../sql-reference/statements/alter.md#alter_freeze-partition)
|
||||
|
||||
- `database` (`String`) – Name of the database.
|
||||
|
||||
- `table` (`String`) – Name of the table.
|
||||
|
||||
- `engine` (`String`) – Name of the table engine without parameters.
|
||||
|
||||
- `path` (`String`) – Absolute path to the folder with data part files.
|
||||
|
||||
- `disk` (`String`) – Name of a disk that stores the data part.
|
||||
|
||||
- `hash_of_all_files` (`String`) – [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) 的压缩文件。
|
||||
|
||||
- `hash_of_uncompressed_files` (`String`) – [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) 未压缩的文件(带标记的文件,索引文件等。).
|
||||
|
||||
- `uncompressed_hash_of_compressed_files` (`String`) – [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) 压缩文件中的数据,就好像它们是未压缩的。
|
||||
|
||||
- `bytes` (`UInt64`) – Alias for `bytes_on_disk`.
|
||||
|
||||
- `marks_size` (`UInt64`) – Alias for `marks_bytes`.
|
20
docs/zh/operations/system-tables/processes.md
Normal file
20
docs/zh/operations/system-tables/processes.md
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。流程 {#system_tables-processes}
|
||||
|
||||
该系统表用于实现 `SHOW PROCESSLIST` 查询。
|
||||
|
||||
列:
|
||||
|
||||
- `user` (String) – The user who made the query. Keep in mind that for distributed processing, queries are sent to remote servers under the `default` 用户。 该字段包含特定查询的用户名,而不是此查询启动的查询的用户名。
|
||||
- `address` (String) – The IP address the request was made from. The same for distributed processing. To track where a distributed query was originally made from, look at `system.processes` 查询请求者服务器上。
|
||||
- `elapsed` (Float64) – The time in seconds since request execution started.
|
||||
- `rows_read` (UInt64) – The number of rows read from the table. For distributed processing, on the requestor server, this is the total for all remote servers.
|
||||
- `bytes_read` (UInt64) – The number of uncompressed bytes read from the table. For distributed processing, on the requestor server, this is the total for all remote servers.
|
||||
- `total_rows_approx` (UInt64) – The approximation of the total number of rows that should be read. For distributed processing, on the requestor server, this is the total for all remote servers. It can be updated during request processing, when new sources to process become known.
|
||||
- `memory_usage` (UInt64) – Amount of RAM the request uses. It might not include some types of dedicated memory. See the [max\_memory\_usage](../../operations/settings/query-complexity.md#settings_max_memory_usage) 设置。
|
||||
- `query` (String) – The query text. For `INSERT`,它不包括要插入的数据。
|
||||
- `query_id` (String) – Query ID, if defined.
|
143
docs/zh/operations/system-tables/query_log.md
Normal file
143
docs/zh/operations/system-tables/query_log.md
Normal file
@ -0,0 +1,143 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。query\_log {#system_tables-query_log}
|
||||
|
||||
包含有关已执行查询的信息,例如,开始时间、处理持续时间、错误消息。
|
||||
|
||||
!!! note "注"
|
||||
此表不包含以下内容的摄取数据 `INSERT` 查询。
|
||||
|
||||
您可以更改查询日志记录的设置 [query\_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query-log) 服务器配置部分。
|
||||
|
||||
您可以通过设置禁用查询日志记录 [log\_queries=0](../../operations/settings/settings.md#settings-log-queries). 我们不建议关闭日志记录,因为此表中的信息对于解决问题很重要。
|
||||
|
||||
数据的冲洗周期设置在 `flush_interval_milliseconds` 的参数 [query\_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query-log) 服务器设置部分。 要强制冲洗,请使用 [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs) 查询。
|
||||
|
||||
ClickHouse不会自动从表中删除数据。 看 [导言](../../operations/system-tables/index.md#system-tables-introduction) 欲了解更多详情。
|
||||
|
||||
该 `system.query_log` 表注册两种查询:
|
||||
|
||||
1. 客户端直接运行的初始查询。
|
||||
2. 由其他查询启动的子查询(用于分布式查询执行)。 对于这些类型的查询,有关父查询的信息显示在 `initial_*` 列。
|
||||
|
||||
每个查询创建一个或两个行中 `query_log` 表,这取决于状态(见 `type` 列)的查询:
|
||||
|
||||
1. 如果查询执行成功,则两行具有 `QueryStart` 和 `QueryFinish` 创建类型。
|
||||
2. 如果在查询处理过程中发生错误,两个事件与 `QueryStart` 和 `ExceptionWhileProcessing` 创建类型。
|
||||
3. 如果在启动查询之前发生错误,则单个事件具有 `ExceptionBeforeStart` 创建类型。
|
||||
|
||||
列:
|
||||
|
||||
- `type` ([枚举8](../../sql-reference/data-types/enum.md)) — Type of an event that occurred when executing the query. Values:
|
||||
- `'QueryStart' = 1` — Successful start of query execution.
|
||||
- `'QueryFinish' = 2` — Successful end of query execution.
|
||||
- `'ExceptionBeforeStart' = 3` — Exception before the start of query execution.
|
||||
- `'ExceptionWhileProcessing' = 4` — Exception during the query execution.
|
||||
- `event_date` ([日期](../../sql-reference/data-types/date.md)) — Query starting date.
|
||||
- `event_time` ([日期时间](../../sql-reference/data-types/datetime.md)) — Query starting time.
|
||||
- `query_start_time` ([日期时间](../../sql-reference/data-types/datetime.md)) — Start time of query execution.
|
||||
- `query_duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Duration of query execution in milliseconds.
|
||||
- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number or rows read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` 和 `JOIN`. 对于分布式查询 `read_rows` 包括在所有副本上读取的行总数。 每个副本发送它的 `read_rows` 值,并且查询的服务器-发起方汇总所有接收到的和本地的值。 缓存卷不会影响此值。
|
||||
- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number or bytes read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` 和 `JOIN`. 对于分布式查询 `read_bytes` 包括在所有副本上读取的行总数。 每个副本发送它的 `read_bytes` 值,并且查询的服务器-发起方汇总所有接收到的和本地的值。 缓存卷不会影响此值。
|
||||
- `written_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` 查询,写入的行数。 对于其他查询,列值为0。
|
||||
- `written_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` 查询时,写入的字节数。 对于其他查询,列值为0。
|
||||
- `result_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of rows in a result of the `SELECT` 查询,或者在一些行 `INSERT` 查询。
|
||||
- `result_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — RAM volume in bytes used to store a query result.
|
||||
- `memory_usage` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Memory consumption by the query.
|
||||
- `query` ([字符串](../../sql-reference/data-types/string.md)) — Query string.
|
||||
- `exception` ([字符串](../../sql-reference/data-types/string.md)) — Exception message.
|
||||
- `exception_code` ([Int32](../../sql-reference/data-types/int-uint.md)) — Code of an exception.
|
||||
- `stack_trace` ([字符串](../../sql-reference/data-types/string.md)) — [堆栈跟踪](https://en.wikipedia.org/wiki/Stack_trace). 如果查询成功完成,则为空字符串。
|
||||
- `is_initial_query` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Query type. Possible values:
|
||||
- 1 — Query was initiated by the client.
|
||||
- 0 — Query was initiated by another query as part of distributed query execution.
|
||||
- `user` ([字符串](../../sql-reference/data-types/string.md)) — Name of the user who initiated the current query.
|
||||
- `query_id` ([字符串](../../sql-reference/data-types/string.md)) — ID of the query.
|
||||
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that was used to make the query.
|
||||
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to make the query.
|
||||
- `initial_user` ([字符串](../../sql-reference/data-types/string.md)) — Name of the user who ran the initial query (for distributed query execution).
|
||||
- `initial_query_id` ([字符串](../../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution).
|
||||
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that the parent query was launched from.
|
||||
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to make the parent query.
|
||||
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Interface that the query was initiated from. Possible values:
|
||||
- 1 — TCP.
|
||||
- 2 — HTTP.
|
||||
- `os_user` ([字符串](../../sql-reference/data-types/string.md)) — Operating system username who runs [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md).
|
||||
- `client_hostname` ([字符串](../../sql-reference/data-types/string.md)) — Hostname of the client machine where the [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或者运行另一个TCP客户端。
|
||||
- `client_name` ([字符串](../../sql-reference/data-types/string.md)) — The [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或另一个TCP客户端名称。
|
||||
- `client_revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Revision of the [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或另一个TCP客户端。
|
||||
- `client_version_major` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Major version of the [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或另一个TCP客户端。
|
||||
- `client_version_minor` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Minor version of the [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或另一个TCP客户端。
|
||||
- `client_version_patch` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Patch component of the [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或另一个TCP客户端版本。
|
||||
- `http_method` (UInt8) — HTTP method that initiated the query. Possible values:
|
||||
- 0 — The query was launched from the TCP interface.
|
||||
- 1 — `GET` 方法被使用。
|
||||
- 2 — `POST` 方法被使用。
|
||||
- `http_user_agent` ([字符串](../../sql-reference/data-types/string.md)) — The `UserAgent` http请求中传递的标头。
|
||||
- `quota_key` ([字符串](../../sql-reference/data-types/string.md)) — The “quota key” 在指定 [配额](../../operations/quotas.md) 设置(见 `keyed`).
|
||||
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
|
||||
- `thread_numbers` ([数组(UInt32)](../../sql-reference/data-types/array.md)) — Number of threads that are participating in query execution.
|
||||
- `ProfileEvents.Names` ([数组(字符串)](../../sql-reference/data-types/array.md)) — Counters that measure different metrics. The description of them could be found in the table [系统。活动](../../operations/system-tables/events.md#system_tables-events)
|
||||
- `ProfileEvents.Values` ([数组(UInt64)](../../sql-reference/data-types/array.md)) — Values of metrics that are listed in the `ProfileEvents.Names` 列。
|
||||
- `Settings.Names` ([数组(字符串)](../../sql-reference/data-types/array.md)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` 参数为1。
|
||||
- `Settings.Values` ([数组(字符串)](../../sql-reference/data-types/array.md)) — Values of settings that are listed in the `Settings.Names` 列。
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.query_log LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
type: QueryStart
|
||||
event_date: 2020-05-13
|
||||
event_time: 2020-05-13 14:02:28
|
||||
query_start_time: 2020-05-13 14:02:28
|
||||
query_duration_ms: 0
|
||||
read_rows: 0
|
||||
read_bytes: 0
|
||||
written_rows: 0
|
||||
written_bytes: 0
|
||||
result_rows: 0
|
||||
result_bytes: 0
|
||||
memory_usage: 0
|
||||
query: SELECT 1
|
||||
exception_code: 0
|
||||
exception:
|
||||
stack_trace:
|
||||
is_initial_query: 1
|
||||
user: default
|
||||
query_id: 5e834082-6f6d-4e34-b47b-cd1934f4002a
|
||||
address: ::ffff:127.0.0.1
|
||||
port: 57720
|
||||
initial_user: default
|
||||
initial_query_id: 5e834082-6f6d-4e34-b47b-cd1934f4002a
|
||||
initial_address: ::ffff:127.0.0.1
|
||||
initial_port: 57720
|
||||
interface: 1
|
||||
os_user: bayonet
|
||||
client_hostname: clickhouse.ru-central1.internal
|
||||
client_name: ClickHouse client
|
||||
client_revision: 54434
|
||||
client_version_major: 20
|
||||
client_version_minor: 4
|
||||
client_version_patch: 1
|
||||
http_method: 0
|
||||
http_user_agent:
|
||||
quota_key:
|
||||
revision: 54434
|
||||
thread_ids: []
|
||||
ProfileEvents.Names: []
|
||||
ProfileEvents.Values: []
|
||||
Settings.Names: ['use_uncompressed_cache','load_balancing','log_queries','max_memory_usage']
|
||||
Settings.Values: ['0','random','1','10000000000']
|
||||
```
|
||||
|
||||
**另请参阅**
|
||||
|
||||
- [系统。query\_thread\_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log) — This table contains information about each query execution thread.
|
118
docs/zh/operations/system-tables/query_thread_log.md
Normal file
118
docs/zh/operations/system-tables/query_thread_log.md
Normal file
@ -0,0 +1,118 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。query\_thread\_log {#system_tables-query_thread_log}
|
||||
|
||||
包含有关执行查询的线程的信息,例如,线程名称、线程开始时间、查询处理的持续时间。
|
||||
|
||||
开始记录:
|
||||
|
||||
1. 在配置参数 [query\_thread\_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) 科。
|
||||
2. 设置 [log\_query\_threads](../../operations/settings/settings.md#settings-log-query-threads) 到1。
|
||||
|
||||
数据的冲洗周期设置在 `flush_interval_milliseconds` 的参数 [query\_thread\_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) 服务器设置部分。 要强制冲洗,请使用 [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs) 查询。
|
||||
|
||||
ClickHouse不会自动从表中删除数据。 看 [导言](../../operations/system-tables/index.md#system-tables-introduction) 欲了解更多详情。
|
||||
|
||||
列:
|
||||
|
||||
- `event_date` ([日期](../../sql-reference/data-types/date.md)) — The date when the thread has finished execution of the query.
|
||||
- `event_time` ([日期时间](../../sql-reference/data-types/datetime.md)) — The date and time when the thread has finished execution of the query.
|
||||
- `query_start_time` ([日期时间](../../sql-reference/data-types/datetime.md)) — Start time of query execution.
|
||||
- `query_duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Duration of query execution.
|
||||
- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of read rows.
|
||||
- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of read bytes.
|
||||
- `written_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` 查询,写入的行数。 对于其他查询,列值为0。
|
||||
- `written_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` 查询时,写入的字节数。 对于其他查询,列值为0。
|
||||
- `memory_usage` ([Int64](../../sql-reference/data-types/int-uint.md)) — The difference between the amount of allocated and freed memory in context of this thread.
|
||||
- `peak_memory_usage` ([Int64](../../sql-reference/data-types/int-uint.md)) — The maximum difference between the amount of allocated and freed memory in context of this thread.
|
||||
- `thread_name` ([字符串](../../sql-reference/data-types/string.md)) — Name of the thread.
|
||||
- `thread_number` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Internal thread ID.
|
||||
- `thread_id` ([Int32](../../sql-reference/data-types/int-uint.md)) — thread ID.
|
||||
- `master_thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — OS initial ID of initial thread.
|
||||
- `query` ([字符串](../../sql-reference/data-types/string.md)) — Query string.
|
||||
- `is_initial_query` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Query type. Possible values:
|
||||
- 1 — Query was initiated by the client.
|
||||
- 0 — Query was initiated by another query for distributed query execution.
|
||||
- `user` ([字符串](../../sql-reference/data-types/string.md)) — Name of the user who initiated the current query.
|
||||
- `query_id` ([字符串](../../sql-reference/data-types/string.md)) — ID of the query.
|
||||
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that was used to make the query.
|
||||
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — The client port that was used to make the query.
|
||||
- `initial_user` ([字符串](../../sql-reference/data-types/string.md)) — Name of the user who ran the initial query (for distributed query execution).
|
||||
- `initial_query_id` ([字符串](../../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution).
|
||||
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that the parent query was launched from.
|
||||
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — The client port that was used to make the parent query.
|
||||
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Interface that the query was initiated from. Possible values:
|
||||
- 1 — TCP.
|
||||
- 2 — HTTP.
|
||||
- `os_user` ([字符串](../../sql-reference/data-types/string.md)) — OS's username who runs [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md).
|
||||
- `client_hostname` ([字符串](../../sql-reference/data-types/string.md)) — Hostname of the client machine where the [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或者运行另一个TCP客户端。
|
||||
- `client_name` ([字符串](../../sql-reference/data-types/string.md)) — The [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或另一个TCP客户端名称。
|
||||
- `client_revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Revision of the [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或另一个TCP客户端。
|
||||
- `client_version_major` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Major version of the [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或另一个TCP客户端。
|
||||
- `client_version_minor` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Minor version of the [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或另一个TCP客户端。
|
||||
- `client_version_patch` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Patch component of the [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或另一个TCP客户端版本。
|
||||
- `http_method` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — HTTP method that initiated the query. Possible values:
|
||||
- 0 — The query was launched from the TCP interface.
|
||||
- 1 — `GET` 方法被使用。
|
||||
- 2 — `POST` 方法被使用。
|
||||
- `http_user_agent` ([字符串](../../sql-reference/data-types/string.md)) — The `UserAgent` http请求中传递的标头。
|
||||
- `quota_key` ([字符串](../../sql-reference/data-types/string.md)) — The “quota key” 在指定 [配额](../../operations/quotas.md) 设置(见 `keyed`).
|
||||
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
|
||||
- `ProfileEvents.Names` ([数组(字符串)](../../sql-reference/data-types/array.md)) — Counters that measure different metrics for this thread. The description of them could be found in the table [系统。活动](#system_tables-events).
|
||||
- `ProfileEvents.Values` ([数组(UInt64)](../../sql-reference/data-types/array.md)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` 列。
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.query_thread_log LIMIT 1 FORMAT Vertical
|
||||
```
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
event_date: 2020-05-13
|
||||
event_time: 2020-05-13 14:02:28
|
||||
query_start_time: 2020-05-13 14:02:28
|
||||
query_duration_ms: 0
|
||||
read_rows: 1
|
||||
read_bytes: 1
|
||||
written_rows: 0
|
||||
written_bytes: 0
|
||||
memory_usage: 0
|
||||
peak_memory_usage: 0
|
||||
thread_name: QueryPipelineEx
|
||||
thread_id: 28952
|
||||
master_thread_id: 28924
|
||||
query: SELECT 1
|
||||
is_initial_query: 1
|
||||
user: default
|
||||
query_id: 5e834082-6f6d-4e34-b47b-cd1934f4002a
|
||||
address: ::ffff:127.0.0.1
|
||||
port: 57720
|
||||
initial_user: default
|
||||
initial_query_id: 5e834082-6f6d-4e34-b47b-cd1934f4002a
|
||||
initial_address: ::ffff:127.0.0.1
|
||||
initial_port: 57720
|
||||
interface: 1
|
||||
os_user: bayonet
|
||||
client_hostname: clickhouse.ru-central1.internal
|
||||
client_name: ClickHouse client
|
||||
client_revision: 54434
|
||||
client_version_major: 20
|
||||
client_version_minor: 4
|
||||
client_version_patch: 1
|
||||
http_method: 0
|
||||
http_user_agent:
|
||||
quota_key:
|
||||
revision: 54434
|
||||
ProfileEvents.Names: ['ContextLock','RealTimeMicroseconds','UserTimeMicroseconds','OSCPUWaitMicroseconds','OSCPUVirtualTimeMicroseconds']
|
||||
ProfileEvents.Values: [1,97,81,5,81]
|
||||
...
|
||||
```
|
||||
|
||||
**另请参阅**
|
||||
|
||||
- [系统。query\_log](../../operations/system-tables/query_log.md#system_tables-query_log) — Description of the `query_log` 系统表,其中包含有关查询执行的公共信息。
|
126
docs/zh/operations/system-tables/replicas.md
Normal file
126
docs/zh/operations/system-tables/replicas.md
Normal file
@ -0,0 +1,126 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。副本 {#system_tables-replicas}
|
||||
|
||||
包含驻留在本地服务器上的复制表的信息和状态。
|
||||
此表可用于监视。 该表对于每个已复制的\*表都包含一行。
|
||||
|
||||
示例:
|
||||
|
||||
``` sql
|
||||
SELECT *
|
||||
FROM system.replicas
|
||||
WHERE table = 'visits'
|
||||
FORMAT Vertical
|
||||
```
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
database: merge
|
||||
table: visits
|
||||
engine: ReplicatedCollapsingMergeTree
|
||||
is_leader: 1
|
||||
can_become_leader: 1
|
||||
is_readonly: 0
|
||||
is_session_expired: 0
|
||||
future_parts: 1
|
||||
parts_to_check: 0
|
||||
zookeeper_path: /clickhouse/tables/01-06/visits
|
||||
replica_name: example01-06-1.yandex.ru
|
||||
replica_path: /clickhouse/tables/01-06/visits/replicas/example01-06-1.yandex.ru
|
||||
columns_version: 9
|
||||
queue_size: 1
|
||||
inserts_in_queue: 0
|
||||
merges_in_queue: 1
|
||||
part_mutations_in_queue: 0
|
||||
queue_oldest_time: 2020-02-20 08:34:30
|
||||
inserts_oldest_time: 0000-00-00 00:00:00
|
||||
merges_oldest_time: 2020-02-20 08:34:30
|
||||
part_mutations_oldest_time: 0000-00-00 00:00:00
|
||||
oldest_part_to_get:
|
||||
oldest_part_to_merge_to: 20200220_20284_20840_7
|
||||
oldest_part_to_mutate_to:
|
||||
log_max_index: 596273
|
||||
log_pointer: 596274
|
||||
last_queue_update: 2020-02-20 08:34:32
|
||||
absolute_delay: 0
|
||||
total_replicas: 2
|
||||
active_replicas: 2
|
||||
```
|
||||
|
||||
列:
|
||||
|
||||
- `database` (`String`)-数据库名称
|
||||
- `table` (`String`)-表名
|
||||
- `engine` (`String`)-表引擎名称
|
||||
- `is_leader` (`UInt8`)-副本是否是领导者。
|
||||
一次只有一个副本可以成为领导者。 领导者负责选择要执行的后台合并。
|
||||
请注意,可以对任何可用且在ZK中具有会话的副本执行写操作,而不管该副本是否为leader。
|
||||
- `can_become_leader` (`UInt8`)-副本是否可以当选为领导者。
|
||||
- `is_readonly` (`UInt8`)-副本是否处于只读模式。
|
||||
如果配置没有ZooKeeper的部分,如果在ZooKeeper中重新初始化会话时发生未知错误,以及在ZooKeeper中重新初始化会话时发生未知错误,则此模式将打开。
|
||||
- `is_session_expired` (`UInt8`)-与ZooKeeper的会话已经过期。 基本上一样 `is_readonly`.
|
||||
- `future_parts` (`UInt32`)-由于尚未完成的插入或合并而显示的数据部分的数量。
|
||||
- `parts_to_check` (`UInt32`)-队列中用于验证的数据部分的数量。 如果怀疑零件可能已损坏,则将其放入验证队列。
|
||||
- `zookeeper_path` (`String`)-在ZooKeeper中的表数据路径。
|
||||
- `replica_name` (`String`)-在动物园管理员副本名称. 同一表的不同副本具有不同的名称。
|
||||
- `replica_path` (`String`)-在ZooKeeper中的副本数据的路径。 与连接相同 ‘zookeeper\_path/replicas/replica\_path’.
|
||||
- `columns_version` (`Int32`)-表结构的版本号。 指示执行ALTER的次数。 如果副本有不同的版本,这意味着一些副本还没有做出所有的改变。
|
||||
- `queue_size` (`UInt32`)-等待执行的操作的队列大小。 操作包括插入数据块、合并和某些其他操作。 它通常与 `future_parts`.
|
||||
- `inserts_in_queue` (`UInt32`)-需要插入数据块的数量。 插入通常复制得相当快。 如果这个数字很大,这意味着有什么不对劲。
|
||||
- `merges_in_queue` (`UInt32`)-等待进行合并的数量。 有时合并时间很长,因此此值可能长时间大于零。
|
||||
- `part_mutations_in_queue` (`UInt32`)-等待进行的突变的数量。
|
||||
- `queue_oldest_time` (`DateTime`)-如果 `queue_size` 大于0,显示何时将最旧的操作添加到队列中。
|
||||
- `inserts_oldest_time` (`DateTime`)-看 `queue_oldest_time`
|
||||
- `merges_oldest_time` (`DateTime`)-看 `queue_oldest_time`
|
||||
- `part_mutations_oldest_time` (`DateTime`)-看 `queue_oldest_time`
|
||||
|
||||
接下来的4列只有在有ZK活动会话的情况下才具有非零值。
|
||||
|
||||
- `log_max_index` (`UInt64`)-一般活动日志中的最大条目数。
|
||||
- `log_pointer` (`UInt64`)-副本复制到其执行队列的常规活动日志中的最大条目数加一。 如果 `log_pointer` 比 `log_max_index`,有点不对劲。
|
||||
- `last_queue_update` (`DateTime`)-上次更新队列时。
|
||||
- `absolute_delay` (`UInt64`)-当前副本有多大滞后秒。
|
||||
- `total_replicas` (`UInt8`)-此表的已知副本总数。
|
||||
- `active_replicas` (`UInt8`)-在ZooKeeper中具有会话的此表的副本的数量(即正常运行的副本的数量)。
|
||||
|
||||
如果您请求所有列,表可能会工作得有点慢,因为每行都会从ZooKeeper进行几次读取。
|
||||
如果您没有请求最后4列(log\_max\_index,log\_pointer,total\_replicas,active\_replicas),表工作得很快。
|
||||
|
||||
例如,您可以检查一切是否正常工作,如下所示:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
database,
|
||||
table,
|
||||
is_leader,
|
||||
is_readonly,
|
||||
is_session_expired,
|
||||
future_parts,
|
||||
parts_to_check,
|
||||
columns_version,
|
||||
queue_size,
|
||||
inserts_in_queue,
|
||||
merges_in_queue,
|
||||
log_max_index,
|
||||
log_pointer,
|
||||
total_replicas,
|
||||
active_replicas
|
||||
FROM system.replicas
|
||||
WHERE
|
||||
is_readonly
|
||||
OR is_session_expired
|
||||
OR future_parts > 20
|
||||
OR parts_to_check > 10
|
||||
OR queue_size > 20
|
||||
OR inserts_in_queue > 10
|
||||
OR log_max_index - log_pointer > 10
|
||||
OR total_replicas < 2
|
||||
OR active_replicas < total_replicas
|
||||
```
|
||||
|
||||
如果这个查询没有返回任何东西,这意味着一切都很好。
|
55
docs/zh/operations/system-tables/settings.md
Normal file
55
docs/zh/operations/system-tables/settings.md
Normal file
@ -0,0 +1,55 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。设置 {#system-tables-system-settings}
|
||||
|
||||
包含有关当前用户的会话设置的信息。
|
||||
|
||||
列:
|
||||
|
||||
- `name` ([字符串](../../sql-reference/data-types/string.md)) — Setting name.
|
||||
- `value` ([字符串](../../sql-reference/data-types/string.md)) — Setting value.
|
||||
- `changed` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Shows whether a setting is changed from its default value.
|
||||
- `description` ([字符串](../../sql-reference/data-types/string.md)) — Short setting description.
|
||||
- `min` ([可为空](../../sql-reference/data-types/nullable.md)([字符串](../../sql-reference/data-types/string.md))) — Minimum value of the setting, if any is set via [制约因素](../../operations/settings/constraints-on-settings.md#constraints-on-settings). 如果设置没有最小值,则包含 [NULL](../../sql-reference/syntax.md#null-literal).
|
||||
- `max` ([可为空](../../sql-reference/data-types/nullable.md)([字符串](../../sql-reference/data-types/string.md))) — Maximum value of the setting, if any is set via [制约因素](../../operations/settings/constraints-on-settings.md#constraints-on-settings). 如果设置没有最大值,则包含 [NULL](../../sql-reference/syntax.md#null-literal).
|
||||
- `readonly` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Shows whether the current user can change the setting:
|
||||
- `0` — Current user can change the setting.
|
||||
- `1` — Current user can't change the setting.
|
||||
|
||||
**示例**
|
||||
|
||||
下面的示例演示如何获取有关名称包含的设置的信息 `min_i`.
|
||||
|
||||
``` sql
|
||||
SELECT *
|
||||
FROM system.settings
|
||||
WHERE name LIKE '%min_i%'
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name────────────────────────────────────────┬─value─────┬─changed─┬─description───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─min──┬─max──┬─readonly─┐
|
||||
│ min_insert_block_size_rows │ 1048576 │ 0 │ Squash blocks passed to INSERT query to specified size in rows, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │
|
||||
│ min_insert_block_size_bytes │ 268435456 │ 0 │ Squash blocks passed to INSERT query to specified size in bytes, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │
|
||||
│ read_backoff_min_interval_between_events_ms │ 1000 │ 0 │ Settings to reduce the number of threads in case of slow reads. Do not pay attention to the event, if the previous one has passed less than a certain amount of time. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │
|
||||
└─────────────────────────────────────────────┴───────────┴─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────┴──────┴──────────┘
|
||||
```
|
||||
|
||||
使用 `WHERE changed` 可以是有用的,例如,当你想检查:
|
||||
|
||||
- 配置文件中的设置是否正确加载并正在使用。
|
||||
- 在当前会话中更改的设置。
|
||||
|
||||
<!-- -->
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.settings WHERE changed AND name='load_balancing'
|
||||
```
|
||||
|
||||
**另请参阅**
|
||||
|
||||
- [设置](../../operations/settings/index.md#session-settings-intro)
|
||||
- [查询权限](../../operations/settings/permissions-for-queries.md#settings_readonly)
|
||||
- [对设置的限制](../../operations/settings/constraints-on-settings.md)
|
19
docs/zh/operations/system-tables/storage_policies.md
Normal file
19
docs/zh/operations/system-tables/storage_policies.md
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。storage\_policies {#system_tables-storage_policies}
|
||||
|
||||
包含有关存储策略和卷中定义的信息 [服务器配置](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes_configure).
|
||||
|
||||
列:
|
||||
|
||||
- `policy_name` ([字符串](../../sql-reference/data-types/string.md)) — Name of the storage policy.
|
||||
- `volume_name` ([字符串](../../sql-reference/data-types/string.md)) — Volume name defined in the storage policy.
|
||||
- `volume_priority` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Volume order number in the configuration.
|
||||
- `disks` ([数组(字符串)](../../sql-reference/data-types/array.md)) — Disk names, defined in the storage policy.
|
||||
- `max_data_part_size` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Maximum size of a data part that can be stored on volume disks (0 — no limit).
|
||||
- `move_factor` ([Float64](../../sql-reference/data-types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order.
|
||||
|
||||
如果存储策略包含多个卷,则每个卷的信息将存储在表的单独行中。
|
40
docs/zh/operations/system-tables/table_engines.md
Normal file
40
docs/zh/operations/system-tables/table_engines.md
Normal file
@ -0,0 +1,40 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。表\_engines {#system-table-engines}
|
||||
|
||||
包含服务器支持的表引擎的描述及其功能支持信息。
|
||||
|
||||
此表包含以下列(列类型显示在括号中):
|
||||
|
||||
- `name` (String) — The name of table engine.
|
||||
- `supports_settings` (UInt8) — Flag that indicates if table engine supports `SETTINGS` 条款
|
||||
- `supports_skipping_indices` (UInt8) — Flag that indicates if table engine supports [跳过索引](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-data_skipping-indexes).
|
||||
- `supports_ttl` (UInt8) — Flag that indicates if table engine supports [TTL](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
|
||||
- `supports_sort_order` (UInt8) — Flag that indicates if table engine supports clauses `PARTITION_BY`, `PRIMARY_KEY`, `ORDER_BY` 和 `SAMPLE_BY`.
|
||||
- `supports_replication` (UInt8) — Flag that indicates if table engine supports [数据复制](../../engines/table-engines/mergetree-family/replication.md).
|
||||
- `supports_duduplication` (UInt8) — Flag that indicates if table engine supports data deduplication.
|
||||
|
||||
示例:
|
||||
|
||||
``` sql
|
||||
SELECT *
|
||||
FROM system.table_engines
|
||||
WHERE name in ('Kafka', 'MergeTree', 'ReplicatedCollapsingMergeTree')
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name──────────────────────────┬─supports_settings─┬─supports_skipping_indices─┬─supports_sort_order─┬─supports_ttl─┬─supports_replication─┬─supports_deduplication─┐
|
||||
│ Kafka │ 1 │ 0 │ 0 │ 0 │ 0 │ 0 │
|
||||
│ MergeTree │ 1 │ 1 │ 1 │ 1 │ 0 │ 0 │
|
||||
│ ReplicatedCollapsingMergeTree │ 1 │ 1 │ 1 │ 1 │ 1 │ 1 │
|
||||
└───────────────────────────────┴───────────────────┴───────────────────────────┴─────────────────────┴──────────────┴──────────────────────┴────────────────────────┘
|
||||
```
|
||||
|
||||
**另请参阅**
|
||||
|
||||
- 梅树家族 [查询子句](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-query-clauses)
|
||||
- 卡夫卡 [设置](../../engines/table-engines/integrations/kafka.md#table_engine-kafka-creating-a-table)
|
||||
- 加入我们 [设置](../../engines/table-engines/special/join.md#join-limitations-and-settings)
|
54
docs/zh/operations/system-tables/tables.md
Normal file
54
docs/zh/operations/system-tables/tables.md
Normal file
@ -0,0 +1,54 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。表 {#system-tables}
|
||||
|
||||
包含服务器知道的每个表的元数据。 分离的表不显示在 `system.tables`.
|
||||
|
||||
此表包含以下列(列类型显示在括号中):
|
||||
|
||||
- `database` (String) — The name of the database the table is in.
|
||||
|
||||
- `name` (String) — Table name.
|
||||
|
||||
- `engine` (String) — Table engine name (without parameters).
|
||||
|
||||
- `is_temporary` (UInt8)-指示表是否是临时的标志。
|
||||
|
||||
- `data_path` (String)-文件系统中表数据的路径。
|
||||
|
||||
- `metadata_path` (String)-文件系统中表元数据的路径。
|
||||
|
||||
- `metadata_modification_time` (DateTime)-表元数据的最新修改时间。
|
||||
|
||||
- `dependencies_database` (数组(字符串))-数据库依赖关系.
|
||||
|
||||
- `dependencies_table` (数组(字符串))-表依赖关系 ([MaterializedView](../../engines/table-engines/special/materializedview.md) 基于当前表的表)。
|
||||
|
||||
- `create_table_query` (String)-用于创建表的查询。
|
||||
|
||||
- `engine_full` (String)-表引擎的参数。
|
||||
|
||||
- `partition_key` (String)-表中指定的分区键表达式。
|
||||
|
||||
- `sorting_key` (String)-表中指定的排序键表达式。
|
||||
|
||||
- `primary_key` (String)-表中指定的主键表达式。
|
||||
|
||||
- `sampling_key` (String)-表中指定的采样键表达式。
|
||||
|
||||
- `storage_policy` (字符串)-存储策略:
|
||||
|
||||
- [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes)
|
||||
- [分布](../../engines/table-engines/special/distributed.md#distributed)
|
||||
|
||||
- `total_rows` (Nullable(UInt64))-总行数,如果可以快速确定表中的确切行数,否则 `Null` (包括内衣 `Buffer` 表)。
|
||||
|
||||
- `total_bytes` (Nullable(UInt64))-总字节数,如果可以快速确定存储表的确切字节数,否则 `Null` (**不** 包括任何底层存储)。
|
||||
|
||||
- If the table stores data on disk, returns used space on disk (i.e. compressed).
|
||||
- 如果表在内存中存储数据,返回在内存中使用的近似字节数.
|
||||
|
||||
该 `system.tables` 表中使用 `SHOW TABLES` 查询实现。
|
31
docs/zh/operations/system-tables/text_log.md
Normal file
31
docs/zh/operations/system-tables/text_log.md
Normal file
@ -0,0 +1,31 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。text\_log {#system-tables-text-log}
|
||||
|
||||
包含日志记录条目。 进入该表的日志记录级别可以通过以下方式进行限制 `text_log.level` 服务器设置。
|
||||
|
||||
列:
|
||||
|
||||
- `event_date` (Date) — Date of the entry.
|
||||
- `event_time` (DateTime) — Time of the entry.
|
||||
- `microseconds` (UInt32) — Microseconds of the entry.
|
||||
- `thread_name` (String) — Name of the thread from which the logging was done.
|
||||
- `thread_id` (UInt64) — OS thread ID.
|
||||
- `level` (`Enum8`) — Entry level. Possible values:
|
||||
- `1` 或 `'Fatal'`.
|
||||
- `2` 或 `'Critical'`.
|
||||
- `3` 或 `'Error'`.
|
||||
- `4` 或 `'Warning'`.
|
||||
- `5` 或 `'Notice'`.
|
||||
- `6` 或 `'Information'`.
|
||||
- `7` 或 `'Debug'`.
|
||||
- `8` 或 `'Trace'`.
|
||||
- `query_id` (String) — ID of the query.
|
||||
- `logger_name` (LowCardinality(String)) — Name of the logger (i.e. `DDLWorker`).
|
||||
- `message` (String) — The message itself.
|
||||
- `revision` (UInt32) — ClickHouse revision.
|
||||
- `source_file` (LowCardinality(String)) — Source file from which the logging was done.
|
||||
- `source_line` (UInt64) — Source line from which the logging was done.
|
53
docs/zh/operations/system-tables/trace_log.md
Normal file
53
docs/zh/operations/system-tables/trace_log.md
Normal file
@ -0,0 +1,53 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。trace\_log {#system_tables-trace_log}
|
||||
|
||||
包含采样查询探查器收集的堆栈跟踪。
|
||||
|
||||
ClickHouse创建此表时 [trace\_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-trace_log) 服务器配置部分被设置。 也是 [query\_profiler\_real\_time\_period\_ns](../../operations/settings/settings.md#query_profiler_real_time_period_ns) 和 [query\_profiler\_cpu\_time\_period\_ns](../../operations/settings/settings.md#query_profiler_cpu_time_period_ns) 应设置设置。
|
||||
|
||||
要分析日志,请使用 `addressToLine`, `addressToSymbol` 和 `demangle` 内省功能。
|
||||
|
||||
列:
|
||||
|
||||
- `event_date` ([日期](../../sql-reference/data-types/date.md)) — Date of sampling moment.
|
||||
|
||||
- `event_time` ([日期时间](../../sql-reference/data-types/datetime.md)) — Timestamp of the sampling moment.
|
||||
|
||||
- `timestamp_ns` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Timestamp of the sampling moment in nanoseconds.
|
||||
|
||||
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse server build revision.
|
||||
|
||||
通过以下方式连接到服务器 `clickhouse-client`,你看到的字符串类似于 `Connected to ClickHouse server version 19.18.1 revision 54429.`. 该字段包含 `revision`,但不是 `version` 的服务器。
|
||||
|
||||
- `timer_type` ([枚举8](../../sql-reference/data-types/enum.md)) — Timer type:
|
||||
|
||||
- `Real` 表示挂钟时间。
|
||||
- `CPU` 表示CPU时间。
|
||||
|
||||
- `thread_number` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Thread identifier.
|
||||
|
||||
- `query_id` ([字符串](../../sql-reference/data-types/string.md)) — Query identifier that can be used to get details about a query that was running from the [query\_log](#system_tables-query_log) 系统表.
|
||||
|
||||
- `trace` ([数组(UInt64)](../../sql-reference/data-types/array.md)) — Stack trace at the moment of sampling. Each element is a virtual memory address inside ClickHouse server process.
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.trace_log LIMIT 1 \G
|
||||
```
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
event_date: 2019-11-15
|
||||
event_time: 2019-11-15 15:09:38
|
||||
revision: 54428
|
||||
timer_type: Real
|
||||
thread_number: 48
|
||||
query_id: acc4d61f-5bd1-4a3e-bc91-2180be37c915
|
||||
trace: [94222141367858,94222152240175,94222152325351,94222152329944,94222152330796,94222151449980,94222144088167,94222151682763,94222144088167,94222151682763,94222144088167,94222144058283,94222144059248,94222091840750,94222091842302,94222091831228,94222189631488,140509950166747,140509942945935]
|
||||
```
|
75
docs/zh/operations/system-tables/zookeeper.md
Normal file
75
docs/zh/operations/system-tables/zookeeper.md
Normal file
@ -0,0 +1,75 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。动物园管理员 {#system-zookeeper}
|
||||
|
||||
如果未配置ZooKeeper,则表不存在。 允许从配置中定义的ZooKeeper集群读取数据。
|
||||
查询必须具有 ‘path’ WHERE子句中的平等条件。 这是ZooKeeper中您想要获取数据的孩子的路径。
|
||||
|
||||
查询 `SELECT * FROM system.zookeeper WHERE path = '/clickhouse'` 输出对所有孩子的数据 `/clickhouse` 节点。
|
||||
要输出所有根节点的数据,write path= ‘/’.
|
||||
如果在指定的路径 ‘path’ 不存在,将引发异常。
|
||||
|
||||
列:
|
||||
|
||||
- `name` (String) — The name of the node.
|
||||
- `path` (String) — The path to the node.
|
||||
- `value` (String) — Node value.
|
||||
- `dataLength` (Int32) — Size of the value.
|
||||
- `numChildren` (Int32) — Number of descendants.
|
||||
- `czxid` (Int64) — ID of the transaction that created the node.
|
||||
- `mzxid` (Int64) — ID of the transaction that last changed the node.
|
||||
- `pzxid` (Int64) — ID of the transaction that last deleted or added descendants.
|
||||
- `ctime` (DateTime) — Time of node creation.
|
||||
- `mtime` (DateTime) — Time of the last modification of the node.
|
||||
- `version` (Int32) — Node version: the number of times the node was changed.
|
||||
- `cversion` (Int32) — Number of added or removed descendants.
|
||||
- `aversion` (Int32) — Number of changes to the ACL.
|
||||
- `ephemeralOwner` (Int64) — For ephemeral nodes, the ID of the session that owns this node.
|
||||
|
||||
示例:
|
||||
|
||||
``` sql
|
||||
SELECT *
|
||||
FROM system.zookeeper
|
||||
WHERE path = '/clickhouse/tables/01-08/visits/replicas'
|
||||
FORMAT Vertical
|
||||
```
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
name: example01-08-1.yandex.ru
|
||||
value:
|
||||
czxid: 932998691229
|
||||
mzxid: 932998691229
|
||||
ctime: 2015-03-27 16:49:51
|
||||
mtime: 2015-03-27 16:49:51
|
||||
version: 0
|
||||
cversion: 47
|
||||
aversion: 0
|
||||
ephemeralOwner: 0
|
||||
dataLength: 0
|
||||
numChildren: 7
|
||||
pzxid: 987021031383
|
||||
path: /clickhouse/tables/01-08/visits/replicas
|
||||
|
||||
Row 2:
|
||||
──────
|
||||
name: example01-08-2.yandex.ru
|
||||
value:
|
||||
czxid: 933002738135
|
||||
mzxid: 933002738135
|
||||
ctime: 2015-03-27 16:57:01
|
||||
mtime: 2015-03-27 16:57:01
|
||||
version: 0
|
||||
cversion: 37
|
||||
aversion: 0
|
||||
ephemeralOwner: 0
|
||||
dataLength: 0
|
||||
numChildren: 7
|
||||
pzxid: 987021252247
|
||||
path: /clickhouse/tables/01-08/visits/replicas
|
||||
```
|
@ -19,7 +19,7 @@ ClickHouse:
|
||||
|
||||
字典可以在服务器启动或首次使用时加载,具体取决于 [dictionaries\_lazy\_load](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load) 设置。
|
||||
|
||||
该 [字典](../../../operations/system-tables.md#system_tables-dictionaries) 系统表包含有关在服务器上配置的字典的信息。 对于每个字典,你可以在那里找到:
|
||||
该 [字典](../../../operations/system-tables/dictionaries.md#system_tables-dictionaries) 系统表包含有关在服务器上配置的字典的信息。 对于每个字典,你可以在那里找到:
|
||||
|
||||
- 字典的状态。
|
||||
- 配置参数。
|
||||
|
@ -20,7 +20,7 @@ toc_title: "\u81EA\u7701"
|
||||
|
||||
For security reasons introspection functions are disabled by default.
|
||||
|
||||
ClickHouse将探查器报告保存到 [trace\_log](../../operations/system-tables.md#system_tables-trace_log) 系统表. 确保正确配置了表和探查器。
|
||||
ClickHouse将探查器报告保存到 [trace\_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) 系统表. 确保正确配置了表和探查器。
|
||||
|
||||
## addressToLine {#addresstoline}
|
||||
|
||||
|
@ -1 +0,0 @@
|
||||
../../../en/sql-reference/operators/in.md
|
204
docs/zh/sql-reference/operators/in.md
Normal file
204
docs/zh/sql-reference/operators/in.md
Normal file
@ -0,0 +1,204 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 在运营商 {#select-in-operators}
|
||||
|
||||
该 `IN`, `NOT IN`, `GLOBAL IN`,和 `GLOBAL NOT IN` 运算符是单独复盖的,因为它们的功能相当丰富。
|
||||
|
||||
运算符的左侧是单列或元组。
|
||||
|
||||
例:
|
||||
|
||||
``` sql
|
||||
SELECT UserID IN (123, 456) FROM ...
|
||||
SELECT (CounterID, UserID) IN ((34, 123), (101500, 456)) FROM ...
|
||||
```
|
||||
|
||||
如果左侧是索引中的单列,而右侧是一组常量,则系统将使用索引处理查询。
|
||||
|
||||
Don't list too many values explicitly (i.e. millions). If a data set is large, put it in a temporary table (for example, see the section “External data for query processing”),然后使用子查询。
|
||||
|
||||
运算符的右侧可以是一组常量表达式、一组带有常量表达式的元组(如上面的示例所示),或括号中的数据库表或SELECT子查询的名称。
|
||||
|
||||
如果运算符的右侧是表的名称(例如, `UserID IN users`),这相当于子查询 `UserID IN (SELECT * FROM users)`. 使用与查询一起发送的外部数据时,请使用此选项。 例如,查询可以与一组用户Id一起发送到 ‘users’ 应过滤的临时表。
|
||||
|
||||
如果运算符的右侧是具有Set引擎的表名(始终位于RAM中的准备好的数据集),则不会为每个查询重新创建数据集。
|
||||
|
||||
子查询可以指定多个用于筛选元组的列。
|
||||
示例:
|
||||
|
||||
``` sql
|
||||
SELECT (CounterID, UserID) IN (SELECT CounterID, UserID FROM ...) FROM ...
|
||||
```
|
||||
|
||||
IN运算符左侧和右侧的列应具有相同的类型。
|
||||
|
||||
IN运算符和子查询可能出现在查询的任何部分,包括聚合函数和lambda函数。
|
||||
示例:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
EventDate,
|
||||
avg(UserID IN
|
||||
(
|
||||
SELECT UserID
|
||||
FROM test.hits
|
||||
WHERE EventDate = toDate('2014-03-17')
|
||||
)) AS ratio
|
||||
FROM test.hits
|
||||
GROUP BY EventDate
|
||||
ORDER BY EventDate ASC
|
||||
```
|
||||
|
||||
``` text
|
||||
┌──EventDate─┬────ratio─┐
|
||||
│ 2014-03-17 │ 1 │
|
||||
│ 2014-03-18 │ 0.807696 │
|
||||
│ 2014-03-19 │ 0.755406 │
|
||||
│ 2014-03-20 │ 0.723218 │
|
||||
│ 2014-03-21 │ 0.697021 │
|
||||
│ 2014-03-22 │ 0.647851 │
|
||||
│ 2014-03-23 │ 0.648416 │
|
||||
└────────────┴──────────┘
|
||||
```
|
||||
|
||||
对于3月17日后的每一天,计算3月17日访问该网站的用户所做的浏览量百分比。
|
||||
IN子句中的子查询始终只在单个服务器上运行一次。 没有依赖子查询。
|
||||
|
||||
## 空处理 {#in-null-processing}
|
||||
|
||||
在请求处理过程中, `IN` 运算符假定运算的结果 [NULL](../../sql-reference/syntax.md#null-literal) 总是等于 `0`,无论是否 `NULL` 位于操作员的右侧或左侧。 `NULL` 值不包含在任何数据集中,彼此不对应,并且在以下情况下无法进行比较 [transform\_null\_in=0](../../operations/settings/settings.md#transform_null_in).
|
||||
|
||||
下面是一个例子 `t_null` 表:
|
||||
|
||||
``` text
|
||||
┌─x─┬────y─┐
|
||||
│ 1 │ ᴺᵁᴸᴸ │
|
||||
│ 2 │ 3 │
|
||||
└───┴──────┘
|
||||
```
|
||||
|
||||
运行查询 `SELECT x FROM t_null WHERE y IN (NULL,3)` 为您提供以下结果:
|
||||
|
||||
``` text
|
||||
┌─x─┐
|
||||
│ 2 │
|
||||
└───┘
|
||||
```
|
||||
|
||||
你可以看到,在其中的行 `y = NULL` 被抛出的查询结果。 这是因为ClickHouse无法决定是否 `NULL` 包含在 `(NULL,3)` 设置,返回 `0` 作为操作的结果,和 `SELECT` 从最终输出中排除此行。
|
||||
|
||||
``` sql
|
||||
SELECT y IN (NULL, 3)
|
||||
FROM t_null
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─in(y, tuple(NULL, 3))─┐
|
||||
│ 0 │
|
||||
│ 1 │
|
||||
└───────────────────────┘
|
||||
```
|
||||
|
||||
## 分布式子查询 {#select-distributed-subqueries}
|
||||
|
||||
带子查询的IN-s有两个选项(类似于连接):normal `IN` / `JOIN` 和 `GLOBAL IN` / `GLOBAL JOIN`. 它们在分布式查询处理的运行方式上有所不同。
|
||||
|
||||
!!! attention "注意"
|
||||
请记住,下面描述的算法可能会有不同的工作方式取决于 [设置](../../operations/settings/settings.md) `distributed_product_mode` 设置。
|
||||
|
||||
当使用常规IN时,查询被发送到远程服务器,并且它们中的每个服务器都在运行子查询 `IN` 或 `JOIN` 条款
|
||||
|
||||
使用时 `GLOBAL IN` / `GLOBAL JOINs`,首先所有的子查询都运行 `GLOBAL IN` / `GLOBAL JOINs`,并将结果收集在临时表中。 然后将临时表发送到每个远程服务器,其中使用此临时数据运行查询。
|
||||
|
||||
对于非分布式查询,请使用常规 `IN` / `JOIN`.
|
||||
|
||||
在使用子查询时要小心 `IN` / `JOIN` 用于分布式查询处理的子句。
|
||||
|
||||
让我们来看看一些例子。 假设集群中的每个服务器都有一个正常的 **local\_table**. 每个服务器还具有 **distributed\_table** 表与 **分布** 类型,它查看群集中的所有服务器。
|
||||
|
||||
对于查询 **distributed\_table**,查询将被发送到所有远程服务器,并使用以下命令在其上运行 **local\_table**.
|
||||
|
||||
例如,查询
|
||||
|
||||
``` sql
|
||||
SELECT uniq(UserID) FROM distributed_table
|
||||
```
|
||||
|
||||
将被发送到所有远程服务器
|
||||
|
||||
``` sql
|
||||
SELECT uniq(UserID) FROM local_table
|
||||
```
|
||||
|
||||
并且并行运行它们中的每一个,直到达到可以结合中间结果的阶段。 然后将中间结果返回给请求者服务器并在其上合并,并将最终结果发送给客户端。
|
||||
|
||||
现在让我们检查一个查询IN:
|
||||
|
||||
``` sql
|
||||
SELECT uniq(UserID) FROM distributed_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM local_table WHERE CounterID = 34)
|
||||
```
|
||||
|
||||
- 计算两个网站的受众的交集。
|
||||
|
||||
此查询将以下列方式发送到所有远程服务器
|
||||
|
||||
``` sql
|
||||
SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM local_table WHERE CounterID = 34)
|
||||
```
|
||||
|
||||
换句话说,IN子句中的数据集将在每台服务器上独立收集,仅在每台服务器上本地存储的数据中收集。
|
||||
|
||||
如果您已经为此情况做好准备,并且已经将数据分散到群集服务器上,以便单个用户Id的数据完全驻留在单个服务器上,则这将正常和最佳地工作。 在这种情况下,所有必要的数据将在每台服务器上本地提供。 否则,结果将是不准确的。 我们将查询的这种变体称为 “local IN”.
|
||||
|
||||
若要更正数据在群集服务器上随机传播时查询的工作方式,可以指定 **distributed\_table** 在子查询中。 查询如下所示:
|
||||
|
||||
``` sql
|
||||
SELECT uniq(UserID) FROM distributed_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM distributed_table WHERE CounterID = 34)
|
||||
```
|
||||
|
||||
此查询将以下列方式发送到所有远程服务器
|
||||
|
||||
``` sql
|
||||
SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM distributed_table WHERE CounterID = 34)
|
||||
```
|
||||
|
||||
子查询将开始在每个远程服务器上运行。 由于子查询使用分布式表,因此每个远程服务器上的子查询将重新发送到每个远程服务器
|
||||
|
||||
``` sql
|
||||
SELECT UserID FROM local_table WHERE CounterID = 34
|
||||
```
|
||||
|
||||
例如,如果您有100台服务器的集群,则执行整个查询将需要10,000个基本请求,这通常被认为是不可接受的。
|
||||
|
||||
在这种情况下,应始终使用GLOBAL IN而不是IN。 让我们来看看它是如何工作的查询
|
||||
|
||||
``` sql
|
||||
SELECT uniq(UserID) FROM distributed_table WHERE CounterID = 101500 AND UserID GLOBAL IN (SELECT UserID FROM distributed_table WHERE CounterID = 34)
|
||||
```
|
||||
|
||||
请求者服务器将运行子查询
|
||||
|
||||
``` sql
|
||||
SELECT UserID FROM distributed_table WHERE CounterID = 34
|
||||
```
|
||||
|
||||
结果将被放在RAM中的临时表中。 然后请求将被发送到每个远程服务器
|
||||
|
||||
``` sql
|
||||
SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID GLOBAL IN _data1
|
||||
```
|
||||
|
||||
和临时表 `_data1` 将通过查询发送到每个远程服务器(临时表的名称是实现定义的)。
|
||||
|
||||
这比使用正常IN更优化。 但是,请记住以下几点:
|
||||
|
||||
1. 创建临时表时,数据不是唯一的。 要减少通过网络传输的数据量,请在子查询中指定DISTINCT。 (你不需要为正常人做这个。)
|
||||
2. 临时表将被发送到所有远程服务器。 传输不考虑网络拓扑。 例如,如果10个远程服务器驻留在与请求者服务器非常远程的数据中心中,则数据将通过通道发送10次到远程数据中心。 使用GLOBAL IN时尽量避免使用大型数据集。
|
||||
3. 将数据传输到远程服务器时,无法配置网络带宽限制。 您可能会使网络过载。
|
||||
4. 尝试跨服务器分发数据,以便您不需要定期使用GLOBAL IN。
|
||||
5. 如果您需要经常使用GLOBAL IN,请规划ClickHouse集群的位置,以便单个副本组驻留在不超过一个数据中心中,并且它们之间具有快速网络,以便可以完全在单个数据中心内处理查询。
|
||||
|
||||
这也是有意义的,在指定一个本地表 `GLOBAL IN` 子句,以防此本地表仅在请求者服务器上可用,并且您希望在远程服务器上使用来自它的数据。
|
@ -1,48 +1,47 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_priority: 36
|
||||
toc_title: ALTER
|
||||
---
|
||||
|
||||
## ALTER {#query_language_queries_alter}
|
||||
|
||||
该 `ALTER` 查询仅支持 `*MergeTree` 表,以及 `Merge`和`Distributed`. 查询有几个变体。
|
||||
`ALTER` 仅支持 `*MergeTree` ,`Merge`以及`Distributed`等引擎表。
|
||||
该操作有多种形式。
|
||||
|
||||
### 列操作 {#column-manipulations}
|
||||
|
||||
更改表结构。
|
||||
改变表结构:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN ...
|
||||
```
|
||||
|
||||
在查询中,指定一个或多个逗号分隔操作的列表。
|
||||
每个操作都是对列的操作。
|
||||
在语句中,配置一个或多个用逗号分隔的动作。每个动作是对某个列实施的操作行为。
|
||||
|
||||
支持以下操作:
|
||||
支持下列动作:
|
||||
|
||||
- [ADD COLUMN](#alter_add-column) — Adds a new column to the table.
|
||||
- [DROP COLUMN](#alter_drop-column) — Deletes the column.
|
||||
- [CLEAR COLUMN](#alter_clear-column) — Resets column values.
|
||||
- [COMMENT COLUMN](#alter_comment-column) — Adds a text comment to the column.
|
||||
- [MODIFY COLUMN](#alter_modify-column) — Changes column's type, default expression and TTL.
|
||||
- [ADD COLUMN](#alter_add-column) — 添加列
|
||||
- [DROP COLUMN](#alter_drop-column) — 删除列
|
||||
- [CLEAR COLUMN](#alter_clear-column) — 重置列的值
|
||||
- [COMMENT COLUMN](#alter_comment-column) — 给列增加注释说明
|
||||
- [MODIFY COLUMN](#alter_modify-column) — 改变列的值类型,默认表达式以及TTL
|
||||
|
||||
下面详细描述这些动作。
|
||||
这些动作将在下文中进行详述。
|
||||
|
||||
#### ADD COLUMN {#alter_add-column}
|
||||
#### 增加列 {#alter_add-column}
|
||||
|
||||
``` sql
|
||||
ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after]
|
||||
```
|
||||
|
||||
将一个新列添加到表中,并指定 `name`, `type`, [`codec`](create.md#codecs) 和 `default_expr` (请参阅部分 [默认表达式](create.md#create-default-values)).
|
||||
使用指定的`name`, `type`, [`codec`](../../sql-reference/statements/create.md#codecs) 以及 `default_expr` (请参见 [Default expressions](../../sql-reference/statements/create.md#create-default-values)),往表中增加新的列。
|
||||
|
||||
如果 `IF NOT EXISTS` 如果列已经存在,则查询不会返回错误。 如果您指定 `AFTER name_after` (另一列的名称),该列被添加在表列表中指定的一列之后。 否则,该列将添加到表的末尾。 请注意,没有办法将列添加到表的开头。 为了一系列的行动, `name_after` 可以是在以前的操作之一中添加的列的名称。
|
||||
|
||||
添加列只是更改表结构,而不对数据执行任何操作。 数据不会出现在磁盘上后 `ALTER`. 如果从表中读取某一列的数据缺失,则将使用默认值填充该列(如果存在默认表达式,则执行默认表达式,或使用零或空字符串)。 合并数据部分后,该列将出现在磁盘上(请参阅 [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md)).
|
||||
如果sql中包含 `IF NOT EXISTS` ,执行语句时如果列已经存在,CH不会报错。如果指定`AFTER name_after`(表中另一个列的名称),则新的列会加在指定列的后面。否则,新的列将被添加到表的末尾。注意,不能讲新的列添加到表的开始位置, `name_after` 可以是执行该动作时已经在表中存在的任意列。
|
||||
|
||||
这种方法使我们能够完成 `ALTER` 即时查询,不增加旧数据量。
|
||||
添加列仅仅是改变原有表的结构不会对已有数据产生影响。执行完 `ALTER`后磁盘中也不会出现新的数据。如果查询表时列的数据为空,那么CH会使用列的默认值来进行填充(如果有默认表达式,则使用这个;或者用0或空字符串)。当数据块完成合并(参见[MergeTree](../../engines/table-engines/mergetree-family/mergetree.md))后,磁盘中会出现该列的数据。
|
||||
|
||||
这种方式允许 `ALTER` 语句能马上执行。不需要增加原有数据的大小。
|
||||
|
||||
示例:
|
||||
|
||||
@ -50,15 +49,16 @@ ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after]
|
||||
ALTER TABLE visits ADD COLUMN browser String AFTER user_id
|
||||
```
|
||||
|
||||
#### DROP COLUMN {#alter_drop-column}
|
||||
#### 删除列 {#alter_drop-column}
|
||||
|
||||
``` sql
|
||||
DROP COLUMN [IF EXISTS] name
|
||||
```
|
||||
|
||||
删除具有名称的列 `name`. 如果 `IF EXISTS` 如果指定了子句,如果该列不存在,则查询不会返回错误。
|
||||
通过指定 `name`删除列。如果语句包含 `IF EXISTS`,执行时遇到不存在的列也不会报错。
|
||||
|
||||
从文件系统中删除数据。由于是删除列的整个文件,该语句几乎是立即执行完成的。
|
||||
|
||||
从文件系统中删除数据。 由于这将删除整个文件,查询几乎立即完成。
|
||||
|
||||
示例:
|
||||
|
||||
@ -66,15 +66,16 @@ DROP COLUMN [IF EXISTS] name
|
||||
ALTER TABLE visits DROP COLUMN browser
|
||||
```
|
||||
|
||||
#### CLEAR COLUMN {#alter_clear-column}
|
||||
#### 清空列 {#alter_clear-column}
|
||||
|
||||
``` sql
|
||||
CLEAR COLUMN [IF EXISTS] name IN PARTITION partition_name
|
||||
```
|
||||
|
||||
重置指定分区的列中的所有数据。 了解有关设置分区名称的详细信息 [如何指定分区表达式](#alter-how-to-specify-part-expr).
|
||||
重置指定分区中列的值。 分区名称 `partition_name` 请参见 [怎样设置分区表达式](#alter-how-to-specify-part-expr)
|
||||
|
||||
如果语句中包含 `IF EXISTS` ,遇到不存在的列,sql执行不会报错。
|
||||
|
||||
如果 `IF EXISTS` 如果指定了子句,如果该列不存在,则查询不会返回错误。
|
||||
|
||||
示例:
|
||||
|
||||
@ -82,17 +83,16 @@ CLEAR COLUMN [IF EXISTS] name IN PARTITION partition_name
|
||||
ALTER TABLE visits CLEAR COLUMN browser IN PARTITION tuple()
|
||||
```
|
||||
|
||||
#### COMMENT COLUMN {#alter_comment-column}
|
||||
#### 增加注释 {#alter_comment-column}
|
||||
|
||||
``` sql
|
||||
COMMENT COLUMN [IF EXISTS] name 'comment'
|
||||
```
|
||||
|
||||
向列添加注释。 如果 `IF EXISTS` 如果指定了子句,如果该列不存在,则查询不会返回错误。
|
||||
给列增加注释说明。如果语句中包含 `IF EXISTS` ,遇到不存在的列,sql执行不会报错。
|
||||
|
||||
每列可以有一个注释。 如果列的注释已存在,则新注释将复盖以前的注释。
|
||||
|
||||
注释存储在 `comment_expression` 由返回的列 [DESCRIBE TABLE](misc.md#misc-describe-table) 查询。
|
||||
每个列都可以包含注释。如果列的注释已经存在,新的注释会替换旧的。
|
||||
注释信息保存在 [DESCRIBE TABLE](../../sql-reference/statements/misc.md#misc-describe-table)查询的 `comment_expression` 字段中。
|
||||
|
||||
示例:
|
||||
|
||||
@ -100,25 +100,24 @@ COMMENT COLUMN [IF EXISTS] name 'comment'
|
||||
ALTER TABLE visits COMMENT COLUMN browser 'The table shows the browser used for accessing the site.'
|
||||
```
|
||||
|
||||
#### MODIFY COLUMN {#alter_modify-column}
|
||||
#### 修改列 {#alter_modify-column}
|
||||
|
||||
``` sql
|
||||
MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL]
|
||||
```
|
||||
该语句可以改变 `name` 列的属性:
|
||||
|
||||
此查询更改 `name` 列属性:
|
||||
- Type
|
||||
|
||||
- 类型
|
||||
|
||||
- 默认表达式
|
||||
- Default expression
|
||||
|
||||
- TTL
|
||||
|
||||
For examples of columns TTL modifying, see [Column TTL](../engines/table_engines/mergetree_family/mergetree.md#mergetree-column-ttl).
|
||||
有关修改列TTL的示例,请参见 [Column TTL](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-column-ttl).
|
||||
|
||||
如果 `IF EXISTS` 如果指定了子句,如果该列不存在,则查询不会返回错误。
|
||||
如果语句中包含 `IF EXISTS` ,遇到不存在的列,sql执行不会报错。
|
||||
|
||||
更改类型时,值将被转换为 [toType](../../sql-reference/functions/type-conversion-functions.md) 函数被应用到它们。 如果仅更改默认表达式,则查询不会执行任何复杂的操作,并且几乎立即完成。
|
||||
当改变列的类型时,列的值也被转换了,如同对列使用 [toType](../../sql-reference/functions/type-conversion-functions.md)函数一样。如果只改变了默认表达式,该语句几乎不会做任何复杂操作,并且几乎是立即执行完成的。
|
||||
|
||||
示例:
|
||||
|
||||
@ -126,205 +125,198 @@ MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL]
|
||||
ALTER TABLE visits MODIFY COLUMN browser Array(String)
|
||||
```
|
||||
|
||||
Changing the column type is the only complex action – it changes the contents of files with data. For large tables, this may take a long time.
|
||||
改变列的类型是唯一的复杂型动作 - 它改变了数据文件的内容。对于大型表,执行起来要花费较长的时间。
|
||||
该操作分为如下处理步骤:
|
||||
|
||||
有几个处理阶段:
|
||||
- 为修改的数据准备新的临时文件
|
||||
- 重命名原来的文件
|
||||
- 将新的临时文件改名为原来的数据文件名
|
||||
- 删除原来的文件
|
||||
|
||||
- 准备具有修改数据的临时(新)文件。
|
||||
- 重命名旧文件。
|
||||
- 将临时(新)文件重命名为旧名称。
|
||||
- 删除旧文件。
|
||||
仅仅在第一步是耗费时间的。如果该阶段执行失败,那么数据没有变化。如果执行后续的步骤中失败了,数据可以手动恢复。例外的情形是,当原来的文件从文件系统中被删除了,但是新的数据没有写入到临时文件中并且丢失了。
|
||||
|
||||
只有第一阶段需要时间。 如果在此阶段出现故障,则不会更改数据。
|
||||
如果在其中一个连续阶段中出现故障,可以手动恢复数据。 例外情况是,如果旧文件从文件系统中删除,但新文件的数据没有写入磁盘并丢失。
|
||||
|
||||
该 `ALTER` 复制更改列的查询。 这些指令保存在ZooKeeper中,然后每个副本应用它们。 全部 `ALTER` 查询以相同的顺序运行。 查询等待对其他副本完成适当的操作。 但是,更改复制表中的列的查询可能会中断,并且所有操作都将异步执行。
|
||||
列操作的 `ALTER`行为是可以被复制的。这些指令会保存在ZooKeeper中,这样每个副本节点都能执行它们。所有的 `ALTER` 将按相同的顺序执行。
|
||||
The query waits for the appropriate actions to be completed on the other replicas.
|
||||
然而,改变可复制表的列是可以被中断的,并且所有动作都以异步方式执行。
|
||||
|
||||
#### 更改查询限制 {#alter-query-limitations}
|
||||
|
||||
该 `ALTER` query允许您在嵌套数据结构中创建和删除单独的元素(列),但不能创建整个嵌套数据结构。 要添加嵌套数据结构,可以添加名称如下的列 `name.nested_name` 和类型 `Array(T)`. 嵌套数据结构等效于名称在点之前具有相同前缀的多个数组列。
|
||||
#### ALTER 操作限制 {#alter-query-limitations}
|
||||
|
||||
不支持删除主键或采样键中的列(在主键中使用的列 `ENGINE` 表达式)。 只有在此更改不会导致数据被修改时,才可以更改主键中包含的列的类型(例如,允许您向枚举添加值或更改类型 `DateTime` 到 `UInt32`).
|
||||
`ALTER` 操作允许在嵌套的数据结构中创建和删除单独的元素(列),但是不是整个嵌套结构。添加一个嵌套数据结构的列时,你可以用类似这样的名称 `name.nested_name` 及类型 `Array(T)` 来操作。嵌套数据结构等同于
|
||||
列名前带有同样前缀的多个数组列。
|
||||
|
||||
如果 `ALTER` 查询不足以使您需要的表更改,您可以创建一个新的表,使用 [INSERT SELECT](insert-into.md#insert_query_insert-select) 查询,然后使用切换表 [RENAME](misc.md#misc_operations-rename) 查询并删除旧表。 您可以使用 [ツ环板-ョツ嘉ッツ偲](../../operations/utilities/clickhouse-copier.md) 作为替代 `INSERT SELECT` 查询。
|
||||
|
||||
该 `ALTER` 查询阻止对表的所有读取和写入。 换句话说,如果长 `SELECT` 正在运行的时间 `ALTER` 查询,该 `ALTER` 查询将等待它完成。 同时,对同一个表的所有新查询将等待 `ALTER` 正在运行。
|
||||
不支持对primary key或者sampling key中的列(在 `ENGINE` 表达式中用到的列)进行删除操作。改变包含在primary key中的列的类型时,如果操作不会导致数据的变化(例如,往Enum中添加一个值,或者将`DateTime` 类型改成 `UInt32`),那么这种操作是可行的。
|
||||
|
||||
对于本身不存储数据的表(例如 `Merge` 和 `Distributed`), `ALTER` 只是改变了表结构,并且不改变从属表的结构。 例如,当运行ALTER时 `Distributed` 表,你还需要运行 `ALTER` 对于所有远程服务器上的表。
|
||||
如果 `ALTER` 操作不足以完成你想要的表变动操作,你可以创建一张新的表,通过 [INSERT SELECT](../../sql-reference/statements/insert-into.md#insert_query_insert-select)将数据拷贝进去,然后通过 [RENAME](../../sql-reference/statements/misc.md#misc_operations-rename)将新的表改成和原有表一样的名称,并删除原有的表。你可以使用 [clickhouse-copier](../../operations/utilities/clickhouse-copier.md) 代替 `INSERT SELECT`。
|
||||
|
||||
### 使用键表达式进行操作 {#manipulations-with-key-expressions}
|
||||
`ALTER` 操作会阻塞对表的所有读写操作。换句话说,当一个大的 `SELECT` 语句和 `ALTER`同时执行时,`ALTER`会等待,直到 `SELECT` 执行结束。与此同时,当 `ALTER` 运行时,新的 sql 语句将会等待。
|
||||
|
||||
支持以下命令:
|
||||
|
||||
对于不存储数据的表(例如 `Merge` 及 `Distributed` 表), `ALTER` 仅仅改变了自身的表结构,不会改变从属的表结构。例如,对 `Distributed` 表执行 ALTER 操作时,需要对其它包含该表的服务器执行该操作。
|
||||
|
||||
### key表达式的修改 {#manipulations-with-key-expressions}
|
||||
|
||||
支持下列表达式:
|
||||
|
||||
``` sql
|
||||
MODIFY ORDER BY new_expression
|
||||
```
|
||||
|
||||
它只适用于在表 [`MergeTree`](../../engines/table-engines/mergetree-family/mergetree.md) 家庭(包括
|
||||
[复制](../../engines/table-engines/mergetree-family/replication.md) 表)。 该命令更改
|
||||
[排序键](../../engines/table-engines/mergetree-family/mergetree.md) 表
|
||||
到 `new_expression` (表达式或表达式元组)。 主键保持不变。
|
||||
该操作仅支持 [`MergeTree`](../../engines/table-engines/mergetree-family/mergetree.md) 系列表 (含 [replicated](../../engines/table-engines/mergetree-family/replication.md) 表)。它会将表的 [排序键](../../engines/table-engines/mergetree-family/mergetree.md)变成 `new_expression` (元组表达式)。主键仍保持不变。
|
||||
|
||||
该命令是轻量级的,因为它只更改元数据。 要保持该数据部分的属性
|
||||
行按排序键表达式排序您不能添加包含现有列的表达式
|
||||
到排序键(仅由列添加 `ADD COLUMN` 命令在同一个 `ALTER` 查询)。
|
||||
该操作时轻量级的,仅会改变元数据。
|
||||
|
||||
### 使用数据跳过索引进行操作 {#manipulations-with-data-skipping-indices}
|
||||
|
||||
它只适用于在表 [`*MergeTree`](../../engines/table-engines/mergetree-family/mergetree.md) 家庭(包括
|
||||
[复制](../../engines/table-engines/mergetree-family/replication.md) 表)。 以下操作
|
||||
可用:
|
||||
### 跳过索引来更改数据 {#manipulations-with-data-skipping-indices}
|
||||
|
||||
- `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value AFTER name [AFTER name2]` -将索引描述添加到表元数据。
|
||||
该操作仅支持 [`MergeTree`](../../engines/table-engines/mergetree-family/mergetree.md) 系列表 (含 [replicated](../../engines/table-engines/mergetree-family/replication.md) 表)。
|
||||
下列操作是允许的:
|
||||
|
||||
- `ALTER TABLE [db].name DROP INDEX name` -从表元数据中删除索引描述并从磁盘中删除索引文件。
|
||||
- `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value AFTER name [AFTER name2]` - 在表的元数据中增加索引说明
|
||||
|
||||
这些命令是轻量级的,因为它们只更改元数据或删除文件。
|
||||
此外,它们被复制(通过ZooKeeper同步索引元数据)。
|
||||
- `ALTER TABLE [db].name DROP INDEX name` - 从表的元数据中删除索引描述,并从磁盘上删除索引文件
|
||||
|
||||
### 使用约束进行操作 {#manipulations-with-constraints}
|
||||
由于只改变表的元数据或者删除文件,因此该操作是轻量级的,也可以被复制到其它节点(通过Zookeeper同步索引元数据)
|
||||
|
||||
查看更多 [制约因素](create.md#constraints)
|
||||
### 更改约束 {#manipulations-with-constraints}
|
||||
|
||||
可以使用以下语法添加或删除约束:
|
||||
参见 [constraints](../../sql-reference/statements/create.md#constraints)查看更多信息。
|
||||
|
||||
通过下面的语法,可以添加或删除约束:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE [db].name ADD CONSTRAINT constraint_name CHECK expression;
|
||||
ALTER TABLE [db].name DROP CONSTRAINT constraint_name;
|
||||
```
|
||||
|
||||
查询将从表中添加或删除有关约束的元数据,以便立即处理它们。
|
||||
上述语句会从表中增加或删除约束的元数据,因此会被立即处理。
|
||||
对已有数据的约束检查 *将不会执行* 。
|
||||
|
||||
约束检查 *不会被执行* 在现有数据上,如果它被添加。
|
||||
对可复制表的操作可通过Zookeeper传播到其它副本节点。
|
||||
|
||||
复制表上的所有更改都广播到ZooKeeper,因此将应用于其他副本。
|
||||
### 更改分区及文件块 {#alter_manipulations-with-partitions}
|
||||
|
||||
### 操作与分区和零件 {#alter_manipulations-with-partitions}
|
||||
允许进行下列关于 [partitions](../../engines/table-engines/mergetree-family/custom-partitioning-key.md) 的操作:
|
||||
|
||||
下面的操作与 [分区](../../engines/table-engines/mergetree-family/custom-partitioning-key.md) 可用:
|
||||
|
||||
- [DETACH PARTITION](#alter_detach-partition) – Moves a partition to the `detached` 目录和忘记它。
|
||||
- [DROP PARTITION](#alter_drop-partition) – Deletes a partition.
|
||||
- [ATTACH PART\|PARTITION](#alter_attach-partition) – Adds a part or partition from the `detached` 目录到表。
|
||||
- [ATTACH PARTITION FROM](#alter_attach-partition-from) – Copies the data partition from one table to another and adds.
|
||||
- [REPLACE PARTITION](#alter_replace-partition) -将数据分区从一个表复制到另一个表并替换。
|
||||
- [MOVE PARTITION TO TABLE](#alter_move_to_table-partition)(\#alter\_move\_to\_table-partition)-将数据分区从一个表移动到另一个表。
|
||||
- [CLEAR COLUMN IN PARTITION](#alter_clear-column-partition) -重置分区中指定列的值。
|
||||
- [CLEAR INDEX IN PARTITION](#alter_clear-index-partition) -重置分区中指定的二级索引。
|
||||
- [FREEZE PARTITION](#alter_freeze-partition) – Creates a backup of a partition.
|
||||
- [FETCH PARTITION](#alter_fetch-partition) – Downloads a partition from another server.
|
||||
- [MOVE PARTITION\|PART](#alter_move-partition) – Move partition/data part to another disk or volume.
|
||||
- [DETACH PARTITION](#alter_detach-partition) — 将分区数据移动到 `detached` ,并且忘记它
|
||||
- [DROP PARTITION](#alter_drop-partition) — 删除一个partition.
|
||||
- [ATTACH PART\|PARTITION](#alter_attach-partition) — 将`detached` 目录中的分区重新添加到表中.
|
||||
- [ATTACH PARTITION FROM](#alter_attach-partition-from) — 从表中复制数据分区到另一张表,并添加分区
|
||||
- [REPLACE PARTITION](#alter_replace-partition) — 从表中复制数据分区到其它表及副本
|
||||
- [MOVE PARTITION TO TABLE](#alter_move_to_table-partition) — 从表中复制数据分区到其它表.
|
||||
- [CLEAR COLUMN IN PARTITION](#alter_clear-column-partition) — 重置分区中某个列的值
|
||||
- [CLEAR INDEX IN PARTITION](#alter_clear-index-partition) — 重置分区中指定的二级索引
|
||||
- [FREEZE PARTITION](#alter_freeze-partition) — 创建分区的备份
|
||||
- [FETCH PARTITION](#alter_fetch-partition) — 从其它服务器上下载分
|
||||
- [MOVE PARTITION\|PART](#alter_move-partition) — 将分区/数据块移动到另外的磁盘/卷
|
||||
|
||||
<!-- -->
|
||||
|
||||
#### DETACH PARTITION {#alter_detach-partition}
|
||||
#### 分区剥离 {#alter_detach-partition}
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table_name DETACH PARTITION partition_expr
|
||||
```
|
||||
|
||||
将指定分区的所有数据移动到 `detached` 目录。 服务器会忘记分离的数据分区,就好像它不存在一样。 服务器不会知道这个数据,直到你做 [ATTACH](#alter_attach-partition) 查询。
|
||||
将指定分区的数据移动到 `detached` 目录。服务器会忽略被分离的数据分区。只有当你使用 [ATTACH](#alter_attach-partition) 时,服务器才会知晓这部分数据。
|
||||
|
||||
示例:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE visits DETACH PARTITION 201901
|
||||
```
|
||||
从 [如何设置分区表达式](#alter-how-to-specify-part-expr)章节中获取分区表达式的设置说明。
|
||||
|
||||
阅读有关在一节中设置分区表达式的信息 [如何指定分区表达式](#alter-how-to-specify-part-expr).
|
||||
当执行操作以后,可以对 `detached` 目录的数据进行任意操作,例如删除文件,或者放着不管。
|
||||
|
||||
执行查询后,您可以对查询中的数据进行任何操作 `detached` directory — delete it from the file system, or just leave it.
|
||||
该操作是可以复制的,它会将所有副本节点上的数据移动到 `detached` 目录。注意仅能在副本的leader节点上执行该操作。想了解副本是否是leader节点,需要在 [system.replicas](../../operations/system-tables/replicas.md#system_tables-replicas) 表执行 `SELECT` 操作。或者,可以很方便的在所有副本节点上执行 `DETACH`操作,但除leader外其它的副本节点会抛出异常。
|
||||
|
||||
This query is replicated – it moves the data to the `detached` 所有副本上的目录。 请注意,您只能对领导副本执行此查询。 要确定副本是否为领导者,请执行 `SELECT` 查询到 [系统。副本](../../operations/system-tables.md#system_tables-replicas) 桌子 或者,它更容易使 `DETACH` 对所有副本进行查询-除了领导副本之外,所有副本都会引发异常。
|
||||
|
||||
#### DROP PARTITION {#alter_drop-partition}
|
||||
#### 删除分区 {#alter_drop-partition}
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table_name DROP PARTITION partition_expr
|
||||
```
|
||||
|
||||
从表中删除指定的分区。 此查询将分区标记为非活动分区,并在大约10分钟内完全删除数据。
|
||||
从表中删除指定分区。该操作会将分区标记为不活跃的,然后在大约10分钟内删除全部数据。
|
||||
|
||||
阅读有关在一节中设置分区表达式的信息 [如何指定分区表达式](#alter-how-to-specify-part-expr).
|
||||
在 [如何设置分区表达式](#alter-how-to-specify-part-expr)中获取分区表达式的设置说明。
|
||||
该操作是可复制的,副本节点的数据也将被删除。
|
||||
|
||||
The query is replicated – it deletes data on all replicas.
|
||||
|
||||
#### DROP DETACHED PARTITION\|PART {#alter_drop-detached}
|
||||
#### 删除已剥离的分区\|数据块 {#alter_drop-detached}
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table_name DROP DETACHED PARTITION|PART partition_expr
|
||||
```
|
||||
|
||||
从中删除指定分区的指定部分或所有部分 `detached`.
|
||||
了解有关在一节中设置分区表达式的详细信息 [如何指定分区表达式](#alter-how-to-specify-part-expr).
|
||||
从`detached`目录中删除指定分区的特定部分或所有数据。访问 [如何设置分区表达式](#alter-how-to-specify-part-expr)可获取设置分区表达式的详细信息。
|
||||
|
||||
#### ATTACH PARTITION\|PART {#alter_attach-partition}
|
||||
#### 关联分区\|数据块 {#alter_attach-partition}
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table_name ATTACH PARTITION|PART partition_expr
|
||||
```
|
||||
|
||||
将数据从 `detached` 目录。 可以为整个分区或单独的部分添加数据。 例:
|
||||
从`detached`目录中添加数据到数据表。可以添加整个分区的数据,或者单独的数据块。例如:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE visits ATTACH PARTITION 201901;
|
||||
ALTER TABLE visits ATTACH PART 201901_2_2_0;
|
||||
```
|
||||
|
||||
了解有关在一节中设置分区表达式的详细信息 [如何指定分区表达式](#alter-how-to-specify-part-expr).
|
||||
访问 [如何设置分区表达式](#alter-how-to-specify-part-expr)可获取设置分区表达式的详细信息。
|
||||
|
||||
此查询被复制。 副本发起程序检查是否有数据在 `detached` 目录。 如果数据存在,则查询将检查其完整性。 如果一切正确,则查询将数据添加到表中。 所有其他副本都从副本发起程序下载数据。
|
||||
该操作是可以复制的。副本启动器检查 `detached`目录是否有数据。如果有,该操作会检查数据的完整性。如果一切正常,该操作将数据添加到表中。其它副本节点通过副本启动器下载这些数据。
|
||||
|
||||
所以你可以把数据到 `detached` 在一个副本上的目录,并使用 `ALTER ... ATTACH` 查询以将其添加到所有副本上的表中。
|
||||
因此可以在某个副本上将数据放到 `detached`目录,然后通过 `ALTER ... ATTACH` 操作将这部分数据添加到该表的所有副本。
|
||||
|
||||
#### ATTACH PARTITION FROM {#alter_attach-partition-from}
|
||||
#### 从...关联分区 {#alter_attach-partition-from}
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1
|
||||
```
|
||||
该操作将 `table1` 表的数据分区复制到 `table2` 表的已有分区。注意`table1`表的数据不会被删除。
|
||||
|
||||
此查询将数据分区从 `table1` 到 `table2` 将数据添加到存在 `table2`. 请注意,数据不会从中删除 `table1`.
|
||||
为保证该操作能成功运行,下列条件必须满足:
|
||||
|
||||
要使查询成功运行,必须满足以下条件:
|
||||
- 2张表必须有相同的结构
|
||||
- 2张表必须有相同的分区键
|
||||
|
||||
- 两个表必须具有相同的结构。
|
||||
- 两个表必须具有相同的分区键。
|
||||
|
||||
#### REPLACE PARTITION {#alter_replace-partition}
|
||||
#### 替换分区 {#alter_replace-partition}
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table2 REPLACE PARTITION partition_expr FROM table1
|
||||
```
|
||||
该操作将 `table1` 表的数据分区复制到 `table2`表,并替换 `table2`表的已有分区。注意`table1`表的数据不会被删除。
|
||||
|
||||
此查询将数据分区从 `table1` 到 `table2` 并替换在现有的分区 `table2`. 请注意,数据不会从中删除 `table1`.
|
||||
为保证该操作能成功运行,下列条件必须满足:
|
||||
|
||||
要使查询成功运行,必须满足以下条件:
|
||||
- 2张表必须有相同的结构
|
||||
- 2张表必须有相同的分区键
|
||||
|
||||
- 两个表必须具有相同的结构。
|
||||
- 两个表必须具有相同的分区键。
|
||||
|
||||
#### MOVE PARTITION TO TABLE {#alter_move_to_table-partition}
|
||||
#### 将分区移动到表 {#alter_move_to_table-partition}
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table_source MOVE PARTITION partition_expr TO TABLE table_dest
|
||||
```
|
||||
|
||||
此查询将数据分区从 `table_source` 到 `table_dest` 删除数据 `table_source`.
|
||||
该操作将 `table_source`表的数据分区移动到 `table_dest`表,并删除`table_source`表的数据。
|
||||
|
||||
要使查询成功运行,必须满足以下条件:
|
||||
为保证该操作能成功运行,下列条件必须满足:
|
||||
|
||||
- 两个表必须具有相同的结构。
|
||||
- 两个表必须具有相同的分区键。
|
||||
- 两个表必须是相同的引擎系列。 (已复制或未复制)
|
||||
- 两个表必须具有相同的存储策略。
|
||||
- 2张表必须有相同的结构
|
||||
- 2张表必须有相同的分区键
|
||||
- 2张表必须属于相同的引擎系列(可复制表或不可复制表)
|
||||
- 2张表必须有相同的存储方式
|
||||
|
||||
#### CLEAR COLUMN IN PARTITION {#alter_clear-column-partition}
|
||||
#### 清空分区的列 {#alter_clear-column-partition}
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table_name CLEAR COLUMN column_name IN PARTITION partition_expr
|
||||
```
|
||||
|
||||
重置分区中指定列中的所有值。 如果 `DEFAULT` 创建表时确定了子句,此查询将列值设置为指定的默认值。
|
||||
重置指定分区的特定列的值。如果建表时使用了 `DEFAULT` 语句,该操作会将列的值重置为该默认值。
|
||||
|
||||
示例:
|
||||
|
||||
@ -332,95 +324,93 @@ ALTER TABLE table_name CLEAR COLUMN column_name IN PARTITION partition_expr
|
||||
ALTER TABLE visits CLEAR COLUMN hour in PARTITION 201902
|
||||
```
|
||||
|
||||
#### FREEZE PARTITION {#alter_freeze-partition}
|
||||
#### 冻结分区 {#alter_freeze-partition}
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table_name FREEZE [PARTITION partition_expr]
|
||||
```
|
||||
|
||||
此查询创建指定分区的本地备份。 如果 `PARTITION` 子句被省略,查询一次创建所有分区的备份。
|
||||
该操作为指定分区创建一个本地备份。如果 `PARTITION` 语句省略,该操作会一次性为所有分区创建备份。
|
||||
|
||||
!!! note "注"
|
||||
在不停止服务器的情况下执行整个备份过程。
|
||||
!!! 注意 "Note"
|
||||
整个备份过程不需要停止服务
|
||||
|
||||
请注意,对于旧式表,您可以指定分区名称的前缀(例如, ‘2019’)-然后查询为所有相应的分区创建备份。 阅读有关在一节中设置分区表达式的信息 [如何指定分区表达式](#alter-how-to-specify-part-expr).
|
||||
注意对于老式的表,可以指定分区名前缀(例如,‘2019’),然后该操作会创建所有对应分区的备份。访问 [如何设置分区表达式](#alter-how-to-specify-part-expr)可获取设置分区表达式的详细信息。
|
||||
|
||||
在执行时,对于数据快照,查询将创建指向表数据的硬链接。 硬链接被放置在目录中 `/var/lib/clickhouse/shadow/N/...`,哪里:
|
||||
在执行操作的同时,对于数据快照,该操作会创建到表数据的硬链接。硬链接放置在 `/var/lib/clickhouse/shadow/N/...`,也就是:
|
||||
- `/var/lib/clickhouse/` 服务器配置文件中指定的CH工作目录
|
||||
- `N` 备份的增长序号
|
||||
|
||||
- `/var/lib/clickhouse/` 是配置中指定的工作ClickHouse目录。
|
||||
- `N` 是备份的增量编号。
|
||||
|
||||
!!! note "注"
|
||||
如果您使用 [用于在表中存储数据的一组磁盘](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes),该 `shadow/N` 目录出现在每个磁盘上,存储由匹配的数据部分 `PARTITION` 表达。
|
||||
!!! 注意 "Note"
|
||||
如果你使用 [多个磁盘存储数据表](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes),
|
||||
那么每个磁盘上都有 `shadow/N`目录,用来保存`PARTITION` 表达式对应的数据块。
|
||||
|
||||
在备份内部创建的目录结构与在备份内部创建的目录结构相同 `/var/lib/clickhouse/`. 查询执行 ‘chmod’ 对于所有文件,禁止写入它们。
|
||||
备份内部也会创建和 `/var/lib/clickhouse/` 内部一样的目录结构。该操作在所有文件上执行‘chmod’,禁止往里写入数据
|
||||
|
||||
创建备份后,您可以从以下位置复制数据 `/var/lib/clickhouse/shadow/` 然后将其从本地服务器中删除。 请注意, `ALTER t FREEZE PARTITION` 不复制查询。 它仅在本地服务器上创建本地备份。
|
||||
当备份创建完毕,你可以从 `/var/lib/clickhouse/shadow/`复制数据到远端服务器,然后删除本地数据。注意 `ALTER t FREEZE PARTITION`操作是不能复制的,它仅在本地服务器上创建本地备份。
|
||||
|
||||
查询几乎立即创建备份(但首先它会等待对相应表的当前查询完成运行)。
|
||||
该操作创建备份几乎是即时的(但是首先它会等待相关表的当前操作执行完成)
|
||||
|
||||
`ALTER TABLE t FREEZE PARTITION` 仅复制数据,而不复制表元数据。 若要备份表元数据,请复制该文件 `/var/lib/clickhouse/metadata/database/table.sql`
|
||||
|
||||
要从备份还原数据,请执行以下操作:
|
||||
`ALTER TABLE t FREEZE PARTITION` 仅仅复制数据, 而不是元数据信息. 要复制表的元数据信息, 拷贝这个文件 `/var/lib/clickhouse/metadata/database/table.sql`
|
||||
|
||||
1. 如果表不存在,则创建该表。 要查看查询,请使用。sql文件(替换 `ATTACH` 在它与 `CREATE`).
|
||||
2. 从复制数据 `data/database/table/` 目录内的备份到 `/var/lib/clickhouse/data/database/table/detached/` 目录。
|
||||
3. 快跑 `ALTER TABLE t ATTACH PARTITION` 将数据添加到表的查询。
|
||||
从备份中恢复数据,按如下步骤操作:
|
||||
1. 如果表不存在,先创建。 查看.sql 文件获取执行语句 (将`ATTACH` 替换成 `CREATE`).
|
||||
2. 从 备份的 `data/database/table/`目录中将数据复制到 `/var/lib/clickhouse/data/database/table/detached/`目录
|
||||
3. 运行 `ALTER TABLE t ATTACH PARTITION`操作,将数据添加到表中
|
||||
|
||||
从备份还原不需要停止服务器。
|
||||
恢复数据不需要停止服务进程。
|
||||
想了解备份及数据恢复的更多信息,请参见 [数据备份](../../operations/backup.md) 。
|
||||
|
||||
有关备份和还原数据的详细信息,请参阅 [数据备份](../../operations/backup.md) 科。
|
||||
|
||||
#### CLEAR INDEX IN PARTITION {#alter_clear-index-partition}
|
||||
#### 删除分区的索引 {#alter_clear-index-partition}
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table_name CLEAR INDEX index_name IN PARTITION partition_expr
|
||||
```
|
||||
|
||||
查询的工作原理类似于 `CLEAR COLUMN`,但它重置索引而不是列数据。
|
||||
该操作和 `CLEAR COLUMN`类似,但是它重置的是索引而不是列的数据。
|
||||
|
||||
#### FETCH PARTITION {#alter_fetch-partition}
|
||||
#### 获取分区 {#alter_fetch-partition}
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table_name FETCH PARTITION partition_expr FROM 'path-in-zookeeper'
|
||||
```
|
||||
|
||||
从另一台服务器下载分区。 此查询仅适用于复制的表。
|
||||
从另一服务器上下载分区数据。仅支持可复制引擎表。
|
||||
该操作做了如下步骤:
|
||||
1. 从指定数据分片上下载分区。在 path-in-zookeeper 这一参数你必须设置Zookeeper中该分片的path值。
|
||||
2. 然后将已下载的数据放到 `table_name` 表的 `detached` 目录下。通过 [ATTACH PARTITION\|PART](#alter_attach-partition)将数据加载到表中。
|
||||
|
||||
查询执行以下操作:
|
||||
|
||||
1. 从指定的分片下载分区。 在 ‘path-in-zookeeper’ 您必须在ZooKeeper中指定分片的路径。
|
||||
2. 然后查询将下载的数据放到 `detached` 的目录 `table_name` 桌子 使用 [ATTACH PARTITION\|PART](#alter_attach-partition) 查询将数据添加到表中。
|
||||
|
||||
例如:
|
||||
示例:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE users FETCH PARTITION 201902 FROM '/clickhouse/tables/01-01/visits';
|
||||
ALTER TABLE users ATTACH PARTITION 201902;
|
||||
```
|
||||
|
||||
请注意:
|
||||
注意:
|
||||
|
||||
- 该 `ALTER ... FETCH PARTITION` 查询不被复制。 它将分区放置在 `detached` 仅在本地服务器上的目录。
|
||||
- 该 `ALTER TABLE ... ATTACH` 复制查询。 它将数据添加到所有副本。 数据被添加到从副本之一 `detached` 目录,以及其他-从相邻的副本。
|
||||
- `ALTER ... FETCH PARTITION` 操作不支持复制,它仅在本地服务器上将分区移动到 `detached`目录。
|
||||
- `ALTER TABLE ... ATTACH`操作是可复制的。它将数据添加到所有副本。数据从某个副本的`detached` 目录中添加进来,然后添加到邻近的副本
|
||||
|
||||
在下载之前,系统会检查分区是否存在并且表结构匹配。 从正常副本中自动选择最合适的副本。
|
||||
在开始下载之前,系统检查分区是否存在以及和表结构是否匹配。然后从健康的副本集中自动选择最合适的副本。
|
||||
|
||||
虽然查询被调用 `ALTER TABLE`,它不会更改表结构,并且不会立即更改表中可用的数据。
|
||||
虽然操作叫做 `ALTER TABLE`,但是它并不能改变表结构,也不会立即改变表中可用的数据。
|
||||
|
||||
#### MOVE PARTITION\|PART {#alter_move-partition}
|
||||
#### 移动分区\|数据块 {#alter_move-partition}
|
||||
|
||||
将分区或数据部分移动到另一个卷或磁盘 `MergeTree`-发动机表。 看 [使用多个块设备进行数据存储](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes).
|
||||
将 `MergeTree`引擎表的分区或数据块移动到另外的卷/磁盘中。参见 [使用多个块设备存储数据](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes)
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table_name MOVE PARTITION|PART partition_expr TO DISK|VOLUME 'disk_name'
|
||||
```
|
||||
|
||||
该 `ALTER TABLE t MOVE` 查询:
|
||||
`ALTER TABLE t MOVE` 操作:
|
||||
|
||||
- 不复制,因为不同的副本可能具有不同的存储策略。
|
||||
- 如果未配置指定的磁盘或卷,则返回错误。 如果无法应用存储策略中指定的数据移动条件,Query还会返回错误。
|
||||
- 可以在返回错误的情况下,当要移动的数据已经被后台进程移动时,并发 `ALTER TABLE t MOVE` 查询或作为后台数据合并的结果。 在这种情况下,用户不应该执行任何其他操作。
|
||||
- 不支持复制,因为不同副本可以有不同的存储方式
|
||||
- 如果指定的磁盘或卷没有配置,返回错误。如果存储方式中设定的数据移动条件不能满足,该操作同样报错。
|
||||
- 这种情况也会报错:即将移动的数据已经由后台进程在进行移动操作时,并行的 `ALTER TABLE t MOVE`操作或者作为后台数据合并的结果。这种情形下用户不能任何额外的动作。
|
||||
|
||||
示例:
|
||||
|
||||
@ -431,79 +421,75 @@ ALTER TABLE hits MOVE PARTITION '2019-09-01' TO DISK 'fast_ssd'
|
||||
|
||||
#### 如何设置分区表达式 {#alter-how-to-specify-part-expr}
|
||||
|
||||
您可以在以下内容中指定分区表达式 `ALTER ... PARTITION` 以不同方式查询:
|
||||
通过不同方式在 `ALTER ... PARTITION` 操作中设置分区表达式:
|
||||
|
||||
- 作为从值 `partition` 列 `system.parts` 桌子 例如, `ALTER TABLE visits DETACH PARTITION 201901`.
|
||||
- 作为来自表列的表达式。 支持常量和常量表达式。 例如, `ALTER TABLE visits DETACH PARTITION toYYYYMM(toDate('2019-01-25'))`.
|
||||
- 使用分区ID。 分区ID是用作文件系统和ZooKeeper中分区名称的分区的字符串标识符(如果可能的话,人类可读)。 分区ID必须在指定 `PARTITION ID` 子句,用单引号。 例如, `ALTER TABLE visits DETACH PARTITION ID '201901'`.
|
||||
- 在 [ALTER ATTACH PART](#alter_attach-partition) 和 [DROP DETACHED PART](#alter_drop-detached) 查询时,要指定部件的名称,请将字符串文字与来自 `name` 列 [系统。detached\_parts](../../operations/system-tables.md#system_tables-detached_parts) 桌子 例如, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`.
|
||||
- `system.parts`表 `partition`列的某个值,例如, `ALTER TABLE visits DETACH PARTITION 201901`
|
||||
- 表的列表达式。支持常量及常量表达式。例如, `ALTER TABLE visits DETACH PARTITION toYYYYMM(toDate('2019-01-25'))`
|
||||
- 使用分区ID。分区ID是字符串变量(可能的话有较好的可读性),在文件系统和ZooKeeper中作为分区名称。分区ID必须配置在 `PARTITION ID`中,用单引号包含,例如, `ALTER TABLE visits DETACH PARTITION ID '201901'`
|
||||
- 在 [ALTER ATTACH PART](#alter_attach-partition) 和 [DROP DETACHED PART](#alter_drop-detached) 操作中,要配置块的名称,使用 [system.detached\_parts](../../operations/system-tables/detached_parts.md#system_tables-detached_parts)表中 `name`列的字符串值,例如: `ALTER TABLE visits ATTACH PART '201901_1_1_0'`
|
||||
|
||||
指定分区时引号的使用取决于分区表达式的类型。 例如,对于 `String` 类型,你必须在引号中指定其名称 (`'`). 为 `Date` 和 `Int*` 类型不需要引号。
|
||||
|
||||
对于旧式表,您可以将分区指定为数字 `201901` 或者一个字符串 `'201901'`. 对于类型,新样式表的语法更严格(类似于值输入格式的解析器)。
|
||||
设置分区时,引号使用要看分区表达式的类型。例如,对于 `String`类型,需要设置用引号(`'`)包含的名称。对于 `Date` 和 `Int*`引号就不需要了。
|
||||
对于老式的表,可以用数值`201901` 或字符串 `'201901'`来设置分区。新式的表语法严格和类型一致(类似于VALUES输入的解析)
|
||||
|
||||
上述所有规则也适用于 [OPTIMIZE](misc.md#misc_operations-optimize) 查询。 如果在优化非分区表时需要指定唯一的分区,请设置表达式 `PARTITION tuple()`. 例如:
|
||||
上述所有规则同样适用于 [OPTIMIZE](../../sql-reference/statements/misc.md#misc_operations-optimize) 操作。在对未分区的表进行 OPTIMIZE 操作时,如果需要指定唯一的分区,这样设置表达式`PARTITION tuple()`。例如:
|
||||
|
||||
``` sql
|
||||
OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL;
|
||||
```
|
||||
|
||||
的例子 `ALTER ... PARTITION` 查询在测试中演示 [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) 和 [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql).
|
||||
`ALTER ... PARTITION` 操作的示例在 [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) 和 [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql) 提供了演示。
|
||||
|
||||
### 使用表TTL进行操作 {#manipulations-with-table-ttl}
|
||||
### 更改表的TTL {#manipulations-with-table-ttl}
|
||||
|
||||
你可以改变 [表TTL](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl) 请填写以下表格:
|
||||
通过以下形式的请求可以修改 [table TTL](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl)
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table-name MODIFY TTL ttl-expression
|
||||
```
|
||||
|
||||
### ALTER查询的同步性 {#synchronicity-of-alter-queries}
|
||||
### ALTER操作的同步性 {#synchronicity-of-alter-queries}
|
||||
|
||||
对于不可复制的表,所有 `ALTER` 查询是同步执行的。 对于可复制的表,查询仅添加相应操作的说明 `ZooKeeper`,并尽快执行操作本身。 但是,查询可以等待在所有副本上完成这些操作。
|
||||
对于不可复制的表,所有 `ALTER`操作都是同步执行的。对于可复制的表,ALTER操作会将指令添加到ZooKeeper中,然后会尽快的执行它们。然而,该操作可以等待其它所有副本将指令执行完毕。
|
||||
|
||||
为 `ALTER ... ATTACH|DETACH|DROP` 查询,您可以使用 `replication_alter_partitions_sync` 设置设置等待。
|
||||
可能的值: `0` – do not wait; `1` – only wait for own execution (default); `2` – wait for all.
|
||||
对于 `ALTER ... ATTACH|DETACH|DROP`操作,可以通过设置 `replication_alter_partitions_sync` 来启用等待。可用参数值: `0` – 不需要等待; `1` – 仅等待自己执行(默认); `2` – 等待所有节点
|
||||
|
||||
### 突变 {#alter-mutations}
|
||||
### Mutations {#alter-mutations}
|
||||
|
||||
突变是允许更改或删除表中的行的ALTER查询变体。 与标准相比 `UPDATE` 和 `DELETE` 用于点数据更改的查询,mutations适用于更改表中大量行的繁重操作。 支持的 `MergeTree` 表引擎系列,包括具有复制支持的引擎。
|
||||
Mutations是一类允许对表的行记录进行删除或更新的ALTER操作。相较于标准的 `UPDATE` 和 `DELETE` 用于少量行操作而言,Mutations用来对表的很多行进行重量级的操作。该操作支持 `MergeTree`系列表,包含支持复制功能的表。
|
||||
|
||||
现有表可以按原样进行突变(无需转换),但是在将第一次突变应用于表之后,其元数据格式将与以前的服务器版本不兼容,并且无法回退到以前的版本。
|
||||
已有的表已经支持mutations操作(不需要转换)。但是在首次对表进行mutation操作以后,它的元数据格式变得和和之前的版本不兼容,并且不能回退到之前版本。
|
||||
|
||||
当前可用的命令:
|
||||
目前可用的命令:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE [db.]table DELETE WHERE filter_expr
|
||||
```
|
||||
|
||||
该 `filter_expr` 必须是类型 `UInt8`. 查询删除表中此表达式采用非零值的行。
|
||||
`filter_expr`必须是 `UInt8`型。该操作将删除表中 `filter_expr`表达式值为非0的列
|
||||
|
||||
``` sql
|
||||
ALTER TABLE [db.]table UPDATE column1 = expr1 [, ...] WHERE filter_expr
|
||||
```
|
||||
|
||||
该 `filter_expr` 必须是类型 `UInt8`. 此查询将指定列的值更新为行中相应表达式的值。 `filter_expr` 取非零值。 使用以下命令将值转换为列类型 `CAST` 接线员 不支持更新用于计算主键或分区键的列。
|
||||
`filter_expr`必须是 `UInt8`型。该操作将更新表中各行 `filter_expr`表达式值为非0的指定列的值。通过 `CAST` 操作将值转换成对应列的类型。不支持对用于主键或分区键表达式的列进行更新操作。
|
||||
|
||||
``` sql
|
||||
ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name
|
||||
```
|
||||
|
||||
查询将重新生成二级索引 `name` 在分区中 `partition_name`.
|
||||
该操作更新 `partition_name`分区中的二级索引 `name`.
|
||||
单次操作可以包含多个逗号分隔的命令。
|
||||
|
||||
一个查询可以包含多个用逗号分隔的命令。
|
||||
对于 \*MergeTree引擎表,mutation操作通过重写整个数据块来实现。没有原子性保证 - 被mutation操作的数据会被替换,在mutation期间开始执行的`SELECT`查询能看到所有已经完成mutation的数据,以及还没有被mutation替换的数据。
|
||||
|
||||
For\*MergeTree表的突变通过重写整个数据部分来执行。 没有原子性-部分被取代为突变的部分,只要他们准备好和 `SELECT` 在突变期间开始执行的查询将看到来自已经突变的部件的数据以及来自尚未突变的部件的数据。
|
||||
mutation总是按照它们的创建顺序来排序并以同样顺序在每个数据块中执行。mutation操作也会部分的和Insert操作一起排序 - 在mutation提交之前插入的数据会参与mutation操作,在mutation提交之后的插入的数据则不会参与mutation。注意mutation从来不会阻塞插入操作。
|
||||
|
||||
突变完全按其创建顺序排序,并以该顺序应用于每个部分。 突变也使用插入进行部分排序-在提交突变之前插入到表中的数据将被突变,之后插入的数据将不会被突变。 请注意,突变不会以任何方式阻止插入。
|
||||
mutation操作在提交后(对于可复制表,添加到Zookeeper,对于不可复制表,添加到文件系统)立即返回。mutation操作本身是根据系统的配置参数异步执行的。要跟踪mutation的进度,可以使用系统表 [`system.mutations`](../../operations/system-tables/mutations.md#system_tables-mutations)。已经成功提交的mutation操作在服务重启后仍会继续执行。一旦mutation完成提交,就不能回退了,但是如果因为某种原因操作被卡住了,可以通过 [`KILL MUTATION`](../../sql-reference/statements/misc.md#kill-mutation)操作来取消它的执行。
|
||||
|
||||
Mutation查询在添加mutation条目后立即返回(如果将复制的表复制到ZooKeeper,则将非复制的表复制到文件系统)。 突变本身使用系统配置文件设置异步执行。 要跟踪突变的进度,您可以使用 [`system.mutations`](../../operations/system-tables.md#system_tables-mutations) 桌子 即使重新启动ClickHouse服务器,成功提交的突变仍将继续执行。 一旦提交,没有办法回滚突变,但如果突变由于某种原因被卡住,可以使用 [`KILL MUTATION`](misc.md#kill-mutation) 查询。
|
||||
已完成的mutations记录不会立即删除(要保留的记录数量由 `finished_mutations_to_keep` 这一参数决定)。之前的mutation记录会被删除。
|
||||
|
||||
已完成突变的条目不会立即删除(保留条目的数量由 `finished_mutations_to_keep` 存储引擎参数)。 旧的突变条目将被删除。
|
||||
## 修改用户 {#alter-user-statement}
|
||||
|
||||
## ALTER USER {#alter-user-statement}
|
||||
|
||||
更改ClickHouse用户帐户.
|
||||
修改CH的用户账号
|
||||
|
||||
### 语法 {#alter-user-syntax}
|
||||
|
||||
@ -516,37 +502,37 @@ ALTER USER [IF EXISTS] name [ON CLUSTER cluster_name]
|
||||
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]
|
||||
```
|
||||
|
||||
### 产品描述 {#alter-user-dscr}
|
||||
### 说明 {#alter-user-dscr}
|
||||
|
||||
使用 `ALTER USER` 你必须有 [ALTER USER](grant.md#grant-access-management) 特权
|
||||
要使用 `ALTER USER`,你必须拥有 [ALTER USER](../../sql-reference/statements/grant.md#grant-access-management) 操作的权限
|
||||
|
||||
### 例 {#alter-user-examples}
|
||||
### Examples {#alter-user-examples}
|
||||
|
||||
将授予的角色设置为默认值:
|
||||
设置默认角色:
|
||||
|
||||
``` sql
|
||||
ALTER USER user DEFAULT ROLE role1, role2
|
||||
```
|
||||
|
||||
如果以前未向用户授予角色,ClickHouse将引发异常。
|
||||
如果角色之前没分配给用户,CH会抛出异常。
|
||||
|
||||
将所有授予的角色设置为默认值:
|
||||
将所有分配的角色设为默认
|
||||
|
||||
``` sql
|
||||
ALTER USER user DEFAULT ROLE ALL
|
||||
```
|
||||
|
||||
如果将来将某个角色授予某个用户,它将自动成为默认值。
|
||||
如果以后给用户分配了某个角色,它将自动成为默认角色
|
||||
|
||||
将所有授予的角色设置为默认值,除非 `role1` 和 `role2`:
|
||||
将除了 `role1` 和 `role2`之外的其它角色 设为默认
|
||||
|
||||
``` sql
|
||||
ALTER USER user DEFAULT ROLE ALL EXCEPT role1, role2
|
||||
```
|
||||
|
||||
## ALTER ROLE {#alter-role-statement}
|
||||
## 修改角色 {#alter-role-statement}
|
||||
|
||||
更改角色。
|
||||
修改角色.
|
||||
|
||||
### 语法 {#alter-role-syntax}
|
||||
|
||||
@ -556,9 +542,10 @@ ALTER ROLE [IF EXISTS] name [ON CLUSTER cluster_name]
|
||||
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]
|
||||
```
|
||||
|
||||
## ALTER ROW POLICY {#alter-row-policy-statement}
|
||||
## 修改row policy {#alter-row-policy-statement}
|
||||
|
||||
更改行策略。
|
||||
|
||||
修改row policy.
|
||||
|
||||
### 语法 {#alter-row-policy-syntax}
|
||||
|
||||
@ -571,9 +558,9 @@ ALTER [ROW] POLICY [IF EXISTS] name [ON CLUSTER cluster_name] ON [database.]tabl
|
||||
[TO {role [,...] | ALL | ALL EXCEPT role [,...]}]
|
||||
```
|
||||
|
||||
## ALTER QUOTA {#alter-quota-statement}
|
||||
## 修改配额quotas {#alter-quota-statement}
|
||||
|
||||
更改配额。
|
||||
修改配额quotas.
|
||||
|
||||
### 语法 {#alter-quota-syntax}
|
||||
|
||||
@ -587,9 +574,9 @@ ALTER QUOTA [IF EXISTS] name [ON CLUSTER cluster_name]
|
||||
[TO {role [,...] | ALL | ALL EXCEPT role [,...]}]
|
||||
```
|
||||
|
||||
## ALTER SETTINGS PROFILE {#alter-settings-profile-statement}
|
||||
## 修改settings配置 {#alter-settings-profile-statement}
|
||||
|
||||
更改配额。
|
||||
修改settings配置.
|
||||
|
||||
### 语法 {#alter-settings-profile-syntax}
|
||||
|
||||
@ -599,4 +586,4 @@ ALTER SETTINGS PROFILE [IF EXISTS] name [ON CLUSTER cluster_name]
|
||||
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...]
|
||||
```
|
||||
|
||||
[原始文章](https://clickhouse.tech/docs/en/query_language/alter/) <!--hide-->
|
||||
[Original article](https://clickhouse.tech/docs/en/query_language/alter/) <!--hide-->
|
||||
|
@ -1 +0,0 @@
|
||||
../../../en/sql-reference/statements/grant.md
|
472
docs/zh/sql-reference/statements/grant.md
Normal file
472
docs/zh/sql-reference/statements/grant.md
Normal file
@ -0,0 +1,472 @@
|
||||
---
|
||||
toc_priority: 39
|
||||
toc_title: 授权操作
|
||||
---
|
||||
|
||||
# 授权 {#grant}
|
||||
- 给ClickHouse的用户或角色赋予 [权限](#grant-privileges)
|
||||
- 将角色分配给用户或其他角色
|
||||
|
||||
取消权限,使用 [REVOKE](../../sql-reference/statements/revoke.md)语句。查看已授权的权限请使用 [SHOW GRANTS](../../sql-reference/statements/show.md#show-grants-statement)。
|
||||
|
||||
## 授权操作语法 {#grant-privigele-syntax}
|
||||
|
||||
``` sql
|
||||
GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.table|db.*|*.*|table|*} TO {user | role | CURRENT_USER} [,...] [WITH GRANT OPTION]
|
||||
```
|
||||
|
||||
- `privilege` — 权限类型
|
||||
- `role` — 用户角色
|
||||
- `user` — 用户账号
|
||||
|
||||
`WITH GRANT OPTION` 授予 `user` 或 `role`执行 `GRANT` 操作的权限。用户可将在自身权限范围内的权限进行授权
|
||||
|
||||
## 角色分配的语法 {#assign-role-syntax}
|
||||
|
||||
``` sql
|
||||
GRANT [ON CLUSTER cluster_name] role [,...] TO {user | another_role | CURRENT_USER} [,...] [WITH ADMIN OPTION]
|
||||
```
|
||||
|
||||
- `role` — 角色
|
||||
- `user` — 用户
|
||||
|
||||
`WITH ADMIN OPTION` 授予 `user` 或 `role` 执行[ADMIN OPTION](#admin-option-privilege) 的权限
|
||||
|
||||
## 用法 {#grant-usage}
|
||||
|
||||
使用 `GRANT`,你的账号必须有 `GRANT OPTION`的权限。用户只能将在自身权限范围内的权限进行授权
|
||||
|
||||
例如,管理员有权通过下面的语句给 `john`账号添加授权
|
||||
|
||||
``` sql
|
||||
GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION
|
||||
```
|
||||
|
||||
这意味着 `john` 有权限执行以下操作:
|
||||
|
||||
- `SELECT x,y FROM db.table`.
|
||||
- `SELECT x FROM db.table`.
|
||||
- `SELECT y FROM db.table`.
|
||||
|
||||
`john` 不能执行`SELECT z FROM db.table`。同样的 `SELECT * FROMdb.table` 也是不允许的。执行这个查询时,CH不会返回任何数据,甚至 `x` 和 `y`列。唯一的例外是,当表仅包含 `x`和`y`列时。这种情况下,CH返回所有数据。
|
||||
|
||||
同样 `john` 有权执行 `GRANT OPTION`,因此他能给其它账号进行和自己账号权限范围相同的授权。
|
||||
|
||||
可以使用`*` 号代替表或库名进行授权操作。例如, `GRANT SELECT ONdb.* TO john` 操作运行 `john`对 `db`库的所有表执行 `SELECT`查询。同样,你可以忽略库名。在这种情形下,权限将指向当前的数据库。例如, `GRANT SELECT ON* to john` 对当前数据库的所有表指定授权, `GARNT SELECT ON mytable to john`对当前数据库的 `mytable`表进行授权。
|
||||
|
||||
访问 `systen`数据库总是被允许的(因为这个数据库用来处理sql操作)
|
||||
可以一次给多个账号进行多种授权操作。 `GRANT SELECT,INSERT ON *.* TO john,robin` 允许 `john`和`robin` 账号对任意数据库的任意表执行 `INSERT`和 `SELECT`操作。
|
||||
|
||||
## 权限 {#grant-privileges}
|
||||
|
||||
权限是指执行特定操作的许可
|
||||
|
||||
权限有层级结构。一组允许的操作依赖相应的权限范围。
|
||||
|
||||
权限的层级:
|
||||
|
||||
- [SELECT](#grant-select)
|
||||
- [INSERT](#grant-insert)
|
||||
- [ALTER](#grant-alter)
|
||||
- `ALTER TABLE`
|
||||
- `ALTER UPDATE`
|
||||
- `ALTER DELETE`
|
||||
- `ALTER COLUMN`
|
||||
- `ALTER ADD COLUMN`
|
||||
- `ALTER DROP COLUMN`
|
||||
- `ALTER MODIFY COLUMN`
|
||||
- `ALTER COMMENT COLUMN`
|
||||
- `ALTER CLEAR COLUMN`
|
||||
- `ALTER RENAME COLUMN`
|
||||
- `ALTER INDEX`
|
||||
- `ALTER ORDER BY`
|
||||
- `ALTER ADD INDEX`
|
||||
- `ALTER DROP INDEX`
|
||||
- `ALTER MATERIALIZE INDEX`
|
||||
- `ALTER CLEAR INDEX`
|
||||
- `ALTER CONSTRAINT`
|
||||
- `ALTER ADD CONSTRAINT`
|
||||
|
||||
- `ALTER DROP CONSTRAINT`
|
||||
- `ALTER TTL`
|
||||
- `ALTER MATERIALIZE TTL`
|
||||
- `ALTER SETTINGS`
|
||||
- `ALTER MOVE PARTITION`
|
||||
- `ALTER FETCH PARTITION`
|
||||
- `ALTER FREEZE PARTITION`
|
||||
- `ALTER VIEW`
|
||||
- `ALTER VIEW REFRESH`
|
||||
- `ALTER VIEW MODIFY QUERY`
|
||||
- [CREATE](#grant-create)
|
||||
- `CREATE DATABASE`
|
||||
- `CREATE TABLE`
|
||||
- `CREATE VIEW`
|
||||
- `CREATE DICTIONARY`
|
||||
- `CREATE TEMPORARY TABLE`
|
||||
- [DROP](#grant-drop)
|
||||
- `DROP DATABASE`
|
||||
- `DROP TABLE`
|
||||
- `DROP VIEW`
|
||||
- `DROP DICTIONARY`
|
||||
- [TRUNCATE](#grant-truncate)
|
||||
- [OPTIMIZE](#grant-optimize)
|
||||
- [SHOW](#grant-show)
|
||||
- `SHOW DATABASES`
|
||||
- `SHOW TABLES`
|
||||
- `SHOW COLUMNS`
|
||||
- `SHOW DICTIONARIES`
|
||||
- [KILL QUERY](#grant-kill-query)
|
||||
- [ACCESS MANAGEMENT](#grant-access-management)
|
||||
- `CREATE USER`
|
||||
- `ALTER USER`
|
||||
- `DROP USER`
|
||||
- `CREATE ROLE`
|
||||
- `ALTER ROLE`
|
||||
- `DROP ROLE`
|
||||
- `CREATE ROW POLICY`
|
||||
- `ALTER ROW POLICY`
|
||||
- `DROP ROW POLICY`
|
||||
- `CREATE QUOTA`
|
||||
- `ALTER QUOTA`
|
||||
- `DROP QUOTA`
|
||||
- `CREATE SETTINGS PROFILE`
|
||||
- `ALTER SETTINGS PROFILE`
|
||||
- `DROP SETTINGS PROFILE`
|
||||
- `SHOW ACCESS`
|
||||
- `SHOW_USERS`
|
||||
- `SHOW_ROLES`
|
||||
- `SHOW_ROW_POLICIES`
|
||||
- `SHOW_QUOTAS`
|
||||
- `SHOW_SETTINGS_PROFILES`
|
||||
- `ROLE ADMIN`
|
||||
- [SYSTEM](#grant-system)
|
||||
- `SYSTEM SHUTDOWN`
|
||||
- `SYSTEM DROP CACHE`
|
||||
- `SYSTEM DROP DNS CACHE`
|
||||
- `SYSTEM DROP MARK CACHE`
|
||||
- `SYSTEM DROP UNCOMPRESSED CACHE`
|
||||
- `SYSTEM RELOAD`
|
||||
- `SYSTEM RELOAD CONFIG`
|
||||
- `SYSTEM RELOAD DICTIONARY`
|
||||
- `SYSTEM RELOAD EMBEDDED DICTIONARIES`
|
||||
- `SYSTEM MERGES`
|
||||
- `SYSTEM TTL MERGES`
|
||||
- `SYSTEM FETCHES`
|
||||
- `SYSTEM MOVES`
|
||||
- `SYSTEM SENDS`
|
||||
- `SYSTEM DISTRIBUTED SENDS`
|
||||
- `SYSTEM REPLICATED SENDS`
|
||||
- `SYSTEM REPLICATION QUEUES`
|
||||
- `SYSTEM SYNC REPLICA`
|
||||
- `SYSTEM RESTART REPLICA`
|
||||
- `SYSTEM FLUSH`
|
||||
- `SYSTEM FLUSH DISTRIBUTED`
|
||||
- `SYSTEM FLUSH LOGS`
|
||||
- [INTROSPECTION](#grant-introspection)
|
||||
- `addressToLine`
|
||||
- `addressToSymbol`
|
||||
- `demangle`
|
||||
- [SOURCES](#grant-sources)
|
||||
- `FILE`
|
||||
- `URL`
|
||||
- `REMOTE`
|
||||
- `YSQL`
|
||||
- `ODBC`
|
||||
- `JDBC`
|
||||
- `HDFS`
|
||||
- `S3`
|
||||
- [dictGet](#grant-dictget)
|
||||
|
||||
如何对待该层级的示例:
|
||||
- `ALTER` 权限包含所有其它 `ALTER *` 的权限
|
||||
- `ALTER CONSTRAINT` 包含 `ALTER ADD CONSTRAINT` 和 `ALTER DROP CONSTRAINT`权限
|
||||
|
||||
权限被应用到不同级别。 Knowing of a level suggests syntax available for privilege.
|
||||
|
||||
级别(由低到高):
|
||||
|
||||
- `COLUMN` - 可以授权到列,表,库或者全局
|
||||
- `TABLE` - 可以授权到表,库,或全局
|
||||
- `VIEW` - 可以授权到视图,库,或全局
|
||||
- `DICTIONARY` - 可以授权到字典,库,或全局
|
||||
- `DATABASE` - 可以授权到数据库或全局
|
||||
- `GLABLE` - 可以授权到全局
|
||||
- `GROUP` - 不同级别的权限分组。当授予 `GROUP`级别的权限时, 根据所用的语法,只有对应分组中的权限才会被分配。
|
||||
|
||||
允许的语法示例:
|
||||
|
||||
- `GRANT SELECT(x) ON db.table TO user`
|
||||
- `GRANT SELECT ON db.* TO user`
|
||||
|
||||
不允许的语法示例:
|
||||
|
||||
- `GRANT CREATE USER(x) ON db.table TO user`
|
||||
- `GRANT CREATE USER ON db.* TO user`
|
||||
|
||||
特殊的权限 `ALL` 将所有权限授予给用户或角色
|
||||
|
||||
默认情况下,一个用户账号或角色没有可授予的权限
|
||||
|
||||
如果用户或角色没有任何权限,它将显示为 `NONE`权限
|
||||
|
||||
有些操作根据它们的实现需要一系列的权限。例如, [RENAME](../../sql-reference/statements/misc.md#misc_operations-rename)操作需要以下权限来执行:`SELECT`, `CREATE TABLE`, `INSERT` 和 `DROP TABLE`。
|
||||
|
||||
### SELECT {#grant-select}
|
||||
|
||||
允许执行 [SELECT](../../sql-reference/statements/select/index.md) 查询
|
||||
|
||||
权限级别: `COLUMN`.
|
||||
|
||||
**说明**
|
||||
|
||||
有该权限的用户可以对指定的表和库的指定列进行 `SELECT`查询。如果用户查询包含了其它列则结果不返回数据。
|
||||
|
||||
考虑如下的授权语句:
|
||||
|
||||
``` sql
|
||||
GRANT SELECT(x,y) ON db.table TO john
|
||||
```
|
||||
|
||||
该权限允许 `john` 对 `db.table`表的列`x`,`y`执行任意 `SELECT `查询,例如 `SELECT x FROM db.table`。 `john` 不能执行 `SELECT z FROM db.table`以及 `SELECT * FROM db.table`。执行这个查询时,CH不会返回任何数据,甚至 `x` 和 `y`列。唯一的例外是,当表仅包含 `x`和`y`列时。这种情况下,CH返回所有数据。
|
||||
|
||||
### INSERT {#grant-insert}
|
||||
|
||||
允许执行 [INSERT](../../sql-reference/statements/insert-into.md) 操作.
|
||||
|
||||
权限级别: `COLUMN`.
|
||||
|
||||
**说明**
|
||||
|
||||
有该权限的用户可以对指定的表和库的指定列进行 `INSERT`操作。如果用户查询包含了其它列则结果不返回数据。
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
GRANT INSERT(x,y) ON db.table TO john
|
||||
```
|
||||
|
||||
该权限允许 `john` 对 `db.table`表的列`x`,`y`执行数据插入操作
|
||||
|
||||
### ALTER {#grant-alter}
|
||||
|
||||
允许根据下列权限层级执行 [ALTER](../../sql-reference/statements/alter.md)操作
|
||||
|
||||
- `ALTER`. 级别: `COLUMN`.
|
||||
- `ALTER TABLE`. 级别: `GROUP`
|
||||
- `ALTER UPDATE`. 级别: `COLUMN`. 别名: `UPDATE`
|
||||
- `ALTER DELETE`. 级别: `COLUMN`. 别名: `DELETE`
|
||||
- `ALTER COLUMN`. 级别: `GROUP`
|
||||
- `ALTER ADD COLUMN`. 级别: `COLUMN`. 别名: `ADD COLUMN`
|
||||
- `ALTER DROP COLUMN`. 级别: `COLUMN`. 别名: `DROP COLUMN`
|
||||
- `ALTER MODIFY COLUMN`. 级别: `COLUMN`. 别名: `MODIFY COLUMN`
|
||||
- `ALTER COMMENT COLUMN`. 级别: `COLUMN`. 别名: `COMMENT COLUMN`
|
||||
- `ALTER CLEAR COLUMN`. 级别: `COLUMN`. 别名: `CLEAR COLUMN`
|
||||
- `ALTER RENAME COLUMN`. 级别: `COLUMN`. 别名: `RENAME COLUMN`
|
||||
- `ALTER INDEX`. 级别: `GROUP`. 别名: `INDEX`
|
||||
- `ALTER ORDER BY`. 级别: `TABLE`. 别名: `ALTER MODIFY ORDER BY`, `MODIFY ORDER BY`
|
||||
- `ALTER ADD INDEX`. 级别: `TABLE`. 别名: `ADD INDEX`
|
||||
- `ALTER DROP INDEX`. 级别: `TABLE`. 别名: `DROP INDEX`
|
||||
- `ALTER MATERIALIZE INDEX`. 级别: `TABLE`. 别名: `MATERIALIZE INDEX`
|
||||
- `ALTER CLEAR INDEX`. 级别: `TABLE`. 别名: `CLEAR INDEX`
|
||||
- `ALTER CONSTRAINT`. 级别: `GROUP`. 别名: `CONSTRAINT`
|
||||
- `ALTER ADD CONSTRAINT`. 级别: `TABLE`. 别名: `ADD CONSTRAINT`
|
||||
- `ALTER DROP CONSTRAINT`. 级别: `TABLE`. 别名: `DROP CONSTRAINT`
|
||||
- `ALTER TTL`. 级别: `TABLE`. 别名: `ALTER MODIFY TTL`, `MODIFY TTL`
|
||||
- `ALTER MATERIALIZE TTL`. 级别: `TABLE`. 别名: `MATERIALIZE TTL`
|
||||
- `ALTER SETTINGS`. 级别: `TABLE`. 别名: `ALTER SETTING`, `ALTER MODIFY SETTING`, `MODIFY SETTING`
|
||||
- `ALTER MOVE PARTITION`. 级别: `TABLE`. 别名: `ALTER MOVE PART`, `MOVE PARTITION`, `MOVE PART`
|
||||
- `ALTER FETCH PARTITION`. 级别: `TABLE`. 别名: `FETCH PARTITION`
|
||||
- `ALTER FREEZE PARTITION`. 级别: `TABLE`. 别名: `FREEZE PARTITION`
|
||||
- `ALTER VIEW` 级别: `GROUP`
|
||||
- `ALTER VIEW REFRESH`. 级别: `VIEW`. 别名: `ALTER LIVE VIEW REFRESH`, `REFRESH VIEW`
|
||||
- `ALTER VIEW MODIFY QUERY`. 级别: `VIEW`. 别名: `ALTER TABLE MODIFY QUERY`
|
||||
|
||||
如何对待该层级的示例:
|
||||
- `ALTER` 权限包含所有其它 `ALTER *` 的权限
|
||||
- `ALTER CONSTRAINT` 包含 `ALTER ADD CONSTRAINT` 和 `ALTER DROP CONSTRAINT`权限
|
||||
|
||||
**备注**
|
||||
|
||||
- `MODIFY SETTING`权限允许修改表的引擎设置。它不会影响服务的配置参数
|
||||
- `ATTACH` 操作需要 [CREATE](#grant-create) 权限.
|
||||
- `DETACH` 操作需要 [DROP](#grant-drop) 权限.
|
||||
- 要通过 [KILL MUTATION](../../sql-reference/statements/misc.md#kill-mutation) 操作来终止mutation, 你需要有发起mutation操作的权限。例如,当你想终止 `ALTER UPDATE`操作时,需要有 `ALTER UPDATE`, `ALTER TABLE`, 或 `ALTER`权限
|
||||
|
||||
### CREATE {#grant-create}
|
||||
|
||||
允许根据下面的权限层级来执行 [CREATE](../../sql-reference/statements/create.md) 和 [ATTACH](../../sql-reference/statements/misc.md#attach) DDL语句:
|
||||
|
||||
- `CREATE`. 级别: `GROUP`
|
||||
- `CREATE DATABASE`. 级别: `DATABASE`
|
||||
- `CREATE TABLE`. 级别: `TABLE`
|
||||
- `CREATE VIEW`. 级别: `VIEW`
|
||||
- `CREATE DICTIONARY`. 级别: `DICTIONARY`
|
||||
- `CREATE TEMPORARY TABLE`. 级别: `GLOBAL`
|
||||
|
||||
**备注**
|
||||
|
||||
- 删除已创建的表,用户需要 [DROP](#grant-drop)权限
|
||||
|
||||
### DROP {#grant-drop}
|
||||
|
||||
允许根据下面的权限层级来执行 [DROP](../../sql-reference/statements/misc.md#drop) 和 [DETACH](../../sql-reference/statements/misc.md#detach) :
|
||||
|
||||
- `DROP`. 级别:
|
||||
- `DROP DATABASE`. 级别: `DATABASE`
|
||||
- `DROP TABLE`. 级别: `TABLE`
|
||||
- `DROP VIEW`. 级别: `VIEW`
|
||||
- `DROP DICTIONARY`. 级别: `DICTIONARY`
|
||||
|
||||
### TRUNCATE {#grant-truncate}
|
||||
|
||||
允许执行 [TRUNCATE](../../sql-reference/statements/misc.md#truncate-statement) .
|
||||
|
||||
权限级别: `TABLE`.
|
||||
|
||||
### OPTIMIZE {#grant-optimize}
|
||||
|
||||
允许执行 [OPTIMIZE TABLE](../../sql-reference/statements/misc.md#misc_operations-optimize) .
|
||||
|
||||
权限级别: `TABLE`.
|
||||
|
||||
### SHOW {#grant-show}
|
||||
|
||||
允许根据下面的权限层级来执行 `SHOW`, `DESCRIBE`, `USE`, 和 `EXISTS` :
|
||||
|
||||
- `SHOW`. 级别: `GROUP`
|
||||
- `SHOW DATABASES`. 级别: `DATABASE`. 允许执行 `SHOW DATABASES`, `SHOW CREATE DATABASE`, `USE <database>` .
|
||||
- `SHOW TABLES`. 级别: `TABLE`. 允许执行 `SHOW TABLES`, `EXISTS <table>`, `CHECK <table>` .
|
||||
- `SHOW COLUMNS`. 级别: `COLUMN`. 允许执行 `SHOW CREATE TABLE`, `DESCRIBE` .
|
||||
- `SHOW DICTIONARIES`. 级别: `DICTIONARY`. 允许执行 `SHOW DICTIONARIES`, `SHOW CREATE DICTIONARY`, `EXISTS <dictionary>` .
|
||||
|
||||
**备注**
|
||||
|
||||
用户同时拥有 `SHOW`权限,当用户对指定表,字典或数据库有其它的权限时。
|
||||
|
||||
### KILL QUERY {#grant-kill-query}
|
||||
|
||||
允许根据下面的权限层级来执行 [KILL](../../sql-reference/statements/misc.md#kill-query-statement):
|
||||
|
||||
权限级别: `GLOBAL`.
|
||||
|
||||
**备注**
|
||||
|
||||
`KILL QUERY` 权限允许用户终止其它用户提交的操作。
|
||||
|
||||
### 访问管理 {#grant-access-management}
|
||||
|
||||
允许用户执行管理用户/角色和行规则的操作:
|
||||
|
||||
- `ACCESS MANAGEMENT`. 级别: `GROUP`
|
||||
- `CREATE USER`. 级别: `GLOBAL`
|
||||
- `ALTER USER`. 级别: `GLOBAL`
|
||||
- `DROP USER`. 级别: `GLOBAL`
|
||||
- `CREATE ROLE`. 级别: `GLOBAL`
|
||||
- `ALTER ROLE`. 级别: `GLOBAL`
|
||||
- `DROP ROLE`. 级别: `GLOBAL`
|
||||
- `ROLE ADMIN`. 级别: `GLOBAL`
|
||||
- `CREATE ROW POLICY`. 级别: `GLOBAL`. 别名: `CREATE POLICY`
|
||||
- `ALTER ROW POLICY`. 级别: `GLOBAL`. 别名: `ALTER POLICY`
|
||||
- `DROP ROW POLICY`. 级别: `GLOBAL`. 别名: `DROP POLICY`
|
||||
- `CREATE QUOTA`. 级别: `GLOBAL`
|
||||
- `ALTER QUOTA`. 级别: `GLOBAL`
|
||||
- `DROP QUOTA`. 级别: `GLOBAL`
|
||||
- `CREATE SETTINGS PROFILE`. 级别: `GLOBAL`. 别名: `CREATE PROFILE`
|
||||
- `ALTER SETTINGS PROFILE`. 级别: `GLOBAL`. 别名: `ALTER PROFILE`
|
||||
- `DROP SETTINGS PROFILE`. 级别: `GLOBAL`. 别名: `DROP PROFILE`
|
||||
- `SHOW ACCESS`. 级别: `GROUP`
|
||||
- `SHOW_USERS`. 级别: `GLOBAL`. 别名: `SHOW CREATE USER`
|
||||
- `SHOW_ROLES`. 级别: `GLOBAL`. 别名: `SHOW CREATE ROLE`
|
||||
- `SHOW_ROW_POLICIES`. 级别: `GLOBAL`. 别名: `SHOW POLICIES`, `SHOW CREATE ROW POLICY`, `SHOW CREATE POLICY`
|
||||
- `SHOW_QUOTAS`. 级别: `GLOBAL`. 别名: `SHOW CREATE QUOTA`
|
||||
- `SHOW_SETTINGS_PROFILES`. 级别: `GLOBAL`. 别名: `SHOW PROFILES`, `SHOW CREATE SETTINGS PROFILE`, `SHOW CREATE PROFILE`
|
||||
|
||||
`ROLE ADMIN` 权限允许用户对角色进行分配以及撤回,包括根据管理选项尚未分配的角色
|
||||
|
||||
### SYSTEM {#grant-system}
|
||||
|
||||
允许根据下面的权限层级来执行 [SYSTEM](../../sql-reference/statements/system.md) :
|
||||
|
||||
- `SYSTEM`. 级别: `GROUP`
|
||||
- `SYSTEM SHUTDOWN`. 级别: `GLOBAL`. 别名: `SYSTEM KILL`, `SHUTDOWN`
|
||||
- `SYSTEM DROP CACHE`. 别名: `DROP CACHE`
|
||||
- `SYSTEM DROP DNS CACHE`. 级别: `GLOBAL`. 别名: `SYSTEM DROP DNS`, `DROP DNS CACHE`, `DROP DNS`
|
||||
- `SYSTEM DROP MARK CACHE`. 级别: `GLOBAL`. 别名: `SYSTEM DROP MARK`, `DROP MARK CACHE`, `DROP MARKS`
|
||||
- `SYSTEM DROP UNCOMPRESSED CACHE`. 级别: `GLOBAL`. 别名: `SYSTEM DROP UNCOMPRESSED`, `DROP UNCOMPRESSED CACHE`, `DROP UNCOMPRESSED`
|
||||
- `SYSTEM RELOAD`. 级别: `GROUP`
|
||||
- `SYSTEM RELOAD CONFIG`. 级别: `GLOBAL`. 别名: `RELOAD CONFIG`
|
||||
- `SYSTEM RELOAD DICTIONARY`. 级别: `GLOBAL`. 别名: `SYSTEM RELOAD DICTIONARIES`, `RELOAD DICTIONARY`, `RELOAD DICTIONARIES`
|
||||
- `SYSTEM RELOAD EMBEDDED DICTIONARIES`. 级别: `GLOBAL`. 别名: R`ELOAD EMBEDDED DICTIONARIES`
|
||||
- `SYSTEM MERGES`. 级别: `TABLE`. 别名: `SYSTEM STOP MERGES`, `SYSTEM START MERGES`, `STOP MERGES`, `START MERGES`
|
||||
- `SYSTEM TTL MERGES`. 级别: `TABLE`. 别名: `SYSTEM STOP TTL MERGES`, `SYSTEM START TTL MERGES`, `STOP TTL MERGES`, `START TTL MERGES`
|
||||
- `SYSTEM FETCHES`. 级别: `TABLE`. 别名: `SYSTEM STOP FETCHES`, `SYSTEM START FETCHES`, `STOP FETCHES`, `START FETCHES`
|
||||
- `SYSTEM MOVES`. 级别: `TABLE`. 别名: `SYSTEM STOP MOVES`, `SYSTEM START MOVES`, `STOP MOVES`, `START MOVES`
|
||||
- `SYSTEM SENDS`. 级别: `GROUP`. 别名: `SYSTEM STOP SENDS`, `SYSTEM START SENDS`, `STOP SENDS`, `START SENDS`
|
||||
- `SYSTEM DISTRIBUTED SENDS`. 级别: `TABLE`. 别名: `SYSTEM STOP DISTRIBUTED SENDS`, `SYSTEM START DISTRIBUTED SENDS`, `STOP DISTRIBUTED SENDS`, `START DISTRIBUTED SENDS`
|
||||
- `SYSTEM REPLICATED SENDS`. 级别: `TABLE`. 别名: `SYSTEM STOP REPLICATED SENDS`, `SYSTEM START REPLICATED SENDS`, `STOP REPLICATED SENDS`, `START REPLICATED SENDS`
|
||||
- `SYSTEM REPLICATION QUEUES`. 级别: `TABLE`. 别名: `SYSTEM STOP REPLICATION QUEUES`, `SYSTEM START REPLICATION QUEUES`, `STOP REPLICATION QUEUES`, `START REPLICATION QUEUES`
|
||||
- `SYSTEM SYNC REPLICA`. 级别: `TABLE`. 别名: `SYNC REPLICA`
|
||||
- `SYSTEM RESTART REPLICA`. 级别: `TABLE`. 别名: `RESTART REPLICA`
|
||||
- `SYSTEM FLUSH`. 级别: `GROUP`
|
||||
- `SYSTEM FLUSH DISTRIBUTED`. 级别: `TABLE`. 别名: `FLUSH DISTRIBUTED`
|
||||
- `SYSTEM FLUSH LOGS`. 级别: `GLOBAL`. 别名: `FLUSH LOGS`
|
||||
|
||||
|
||||
`SYSTEM RELOAD EMBEDDED DICTIONARIES` 权限隐式的通过操作 `SYSTEM RELOAD DICTIONARY ON *.*` 来进行授权.
|
||||
|
||||
### 内省introspection {#grant-introspection}
|
||||
|
||||
允许使用 [introspection](../../operations/optimizing-performance/sampling-query-profiler.md) 函数.
|
||||
|
||||
- `INTROSPECTION`. 级别: `GROUP`. 别名: `INTROSPECTION FUNCTIONS`
|
||||
- `addressToLine`. 级别: `GLOBAL`
|
||||
- `addressToSymbol`. 级别: `GLOBAL`
|
||||
- `demangle`. 级别: `GLOBAL`
|
||||
|
||||
### 数据源 {#grant-sources}
|
||||
|
||||
允许在 [table engines](../../engines/table-engines/index.md) 和 [table functions](../../sql-reference/table-functions/index.md#table-functions)中使用外部数据源。
|
||||
|
||||
- `SOURCES`. 级别: `GROUP`
|
||||
- `FILE`. 级别: `GLOBAL`
|
||||
- `URL`. 级别: `GLOBAL`
|
||||
- `REMOTE`. 级别: `GLOBAL`
|
||||
- `YSQL`. 级别: `GLOBAL`
|
||||
- `ODBC`. 级别: `GLOBAL`
|
||||
- `JDBC`. 级别: `GLOBAL`
|
||||
- `HDFS`. 级别: `GLOBAL`
|
||||
- `S3`. 级别: `GLOBAL`
|
||||
|
||||
`SOURCES` 权限允许使用所有数据源。当然也可以单独对每个数据源进行授权。要使用数据源时,还需要额外的权限。
|
||||
|
||||
示例:
|
||||
|
||||
- 创建 [MySQL table engine](../../engines/table-engines/integrations/mysql.md), 需要 `CREATE TABLE (ON db.table_name)` 和 `MYSQL`权限。4
|
||||
- 要使用 [mysql table function](../../sql-reference/table-functions/mysql.md),需要 `CREATE TEMPORARY TABLE` 和 `MYSQL` 权限
|
||||
|
||||
### dictGet {#grant-dictget}
|
||||
|
||||
- `dictGet`. 别名: `dictHas`, `dictGetHierarchy`, `dictIsIn`
|
||||
|
||||
允许用户执行 [dictGet](../../sql-reference/functions/ext-dict-functions.md#dictget), [dictHas](../../sql-reference/functions/ext-dict-functions.md#dicthas), [dictGetHierarchy](../../sql-reference/functions/ext-dict-functions.md#dictgethierarchy), [dictIsIn](../../sql-reference/functions/ext-dict-functions.md#dictisin) 等函数.
|
||||
|
||||
权限级别: `DICTIONARY`.
|
||||
|
||||
**示例**
|
||||
|
||||
- `GRANT dictGet ON mydb.mydictionary TO john`
|
||||
- `GRANT dictGet ON mydictionary TO john`
|
||||
|
||||
### ALL {#grant-all}
|
||||
|
||||
对规定的实体(列,表,库等)给用户或角色授予所有权限
|
||||
|
||||
### NONE {#grant-none}
|
||||
|
||||
不授予任何权限
|
||||
|
||||
### ADMIN OPTION {#admin-option-privilege}
|
||||
|
||||
`ADMIN OPTION` 权限允许用户将他们的角色分配给其它用户
|
||||
|
||||
[原始文档](https://clickhouse.tech/docs/en/query_language/grant/) <!--hide-->
|
@ -223,7 +223,7 @@ KILL MUTATION [ON CLUSTER cluster]
|
||||
[FORMAT format]
|
||||
```
|
||||
|
||||
尝试取消和删除 [突变](alter.md#alter-mutations) 当前正在执行。 要取消的突变选自 [`system.mutations`](../../operations/system-tables.md#system_tables-mutations) 表使用由指定的过滤器 `WHERE` 《公约》条款 `KILL` 查询。
|
||||
尝试取消和删除 [突变](alter.md#alter-mutations) 当前正在执行。 要取消的突变选自 [`system.mutations`](../../operations/system-tables/mutations.md#system_tables-mutations) 表使用由指定的过滤器 `WHERE` 《公约》条款 `KILL` 查询。
|
||||
|
||||
测试查询 (`TEST`)仅检查用户的权限并显示要停止的查询列表。
|
||||
|
||||
|
@ -1 +0,0 @@
|
||||
../../../en/sql-reference/statements/revoke.md
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user