mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-29 11:02:08 +00:00
Merge remote-tracking branch 'rschu1ze/master' into clang-tidy-reenable-checks
This commit is contained in:
commit
2d750e9e2f
@ -289,3 +289,13 @@ inline void writeBinByte(UInt8 byte, void * out)
|
||||
{
|
||||
memcpy(out, &impl::bin_byte_to_char_table[static_cast<size_t>(byte) * 8], 8);
|
||||
}
|
||||
|
||||
/// Converts byte array to a hex string. Useful for debug logging.
|
||||
inline std::string hexString(const void * data, size_t size)
|
||||
{
|
||||
const char * p = reinterpret_cast<const char *>(data);
|
||||
std::string s(size * 2, '\0');
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
writeHexByteLowercase(p[i], s.data() + i * 2);
|
||||
return s;
|
||||
}
|
||||
|
@ -75,9 +75,11 @@ endif ()
|
||||
|
||||
if (LINKER_NAME)
|
||||
message(STATUS "Using linker: ${LINKER_NAME}")
|
||||
else()
|
||||
elseif (NOT ARCH_S390X AND NOT OS_FREEBSD)
|
||||
message (FATAL_ERROR "The only supported linker is LLVM's LLD, but we cannot find it.")
|
||||
else ()
|
||||
message(STATUS "Using linker: <default>")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
# Archiver
|
||||
|
||||
|
@ -56,7 +56,7 @@ ccache_status
|
||||
ccache --zero-stats ||:
|
||||
|
||||
# Check whether the directory with pre-build scripts exists and not empty.
|
||||
if [ -d "/build/packages/pre-build" ] && [ -z "$(ls -A /build/packages/pre-build)" ]; then
|
||||
if [ ! -d "/build/packages/pre-build" ] || [ -n "$(ls -A /build/packages/pre-build)" ]; then
|
||||
echo "There are no subcommands to execute :)"
|
||||
else
|
||||
# Execute all commands
|
||||
|
@ -7,28 +7,34 @@ Contains information about normal and aggregate functions.
|
||||
|
||||
Columns:
|
||||
|
||||
- `name`(`String`) – The name of the function.
|
||||
- `is_aggregate`(`UInt8`) — Whether the function is aggregate.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) – The name of the function.
|
||||
- `is_aggregate` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Whether the function is an aggregate function.
|
||||
- `is_deterministic` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt8](../../sql-reference/data-types/int-uint.md))) - Whether the function is deterministic.
|
||||
- `case_insensitive`, ([UInt8](../../sql-reference/data-types/int-uint.md)) - Whether the function name can be used case-insensitively.
|
||||
- `alias_to`, ([String](../../sql-reference/data-types/string.md)) - The original function name, if the function name is an alias.
|
||||
- `create_query`, ([String](../../sql-reference/data-types/enum.md)) - Unused.
|
||||
- `origin`, ([Enum8](../../sql-reference/data-types/string.md)) - Unused.
|
||||
- `description`, ([String](../../sql-reference/data-types/string.md)) - A high-level description what the function does.
|
||||
- `syntax`, ([String](../../sql-reference/data-types/string.md)) - Signature of the function.
|
||||
- `arguments`, ([String](../../sql-reference/data-types/string.md)) - What arguments does the function take.
|
||||
- `returned_value`, ([String](../../sql-reference/data-types/string.md)) - What does the function return.
|
||||
- `examples`, ([String](../../sql-reference/data-types/string.md)) - Example usage of the function.
|
||||
- `categories`, ([String](../../sql-reference/data-types/string.md)) - The category of the function.
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
SELECT * FROM system.functions LIMIT 10;
|
||||
SELECT name, is_aggregate, is_deterministic, case_insensitive, alias_to FROM system.functions LIMIT 5;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─name──────────────────┬─is_aggregate─┬─case_insensitive─┬─alias_to─┬─create_query─┬─origin─┐
|
||||
│ logTrace │ 0 │ 0 │ │ │ System │
|
||||
│ aes_decrypt_mysql │ 0 │ 0 │ │ │ System │
|
||||
│ aes_encrypt_mysql │ 0 │ 0 │ │ │ System │
|
||||
│ decrypt │ 0 │ 0 │ │ │ System │
|
||||
│ encrypt │ 0 │ 0 │ │ │ System │
|
||||
│ toBool │ 0 │ 0 │ │ │ System │
|
||||
│ windowID │ 0 │ 0 │ │ │ System │
|
||||
│ hopStart │ 0 │ 0 │ │ │ System │
|
||||
│ hop │ 0 │ 0 │ │ │ System │
|
||||
│ snowflakeToDateTime64 │ 0 │ 0 │ │ │ System │
|
||||
└───────────────────────┴──────────────┴──────────────────┴──────────┴──────────────┴────────┘
|
||||
┌─name─────────────────────┬─is_aggregate─┬─is_deterministic─┬─case_insensitive─┬─alias_to─┐
|
||||
│ BLAKE3 │ 0 │ 1 │ 0 │ │
|
||||
│ sipHash128Reference │ 0 │ 1 │ 0 │ │
|
||||
│ mapExtractKeyLike │ 0 │ 1 │ 0 │ │
|
||||
│ sipHash128ReferenceKeyed │ 0 │ 1 │ 0 │ │
|
||||
│ mapPartialSort │ 0 │ 1 │ 0 │ │
|
||||
└──────────────────────────┴──────────────┴──────────────────┴──────────────────┴──────────┘
|
||||
|
||||
10 rows in set. Elapsed: 0.002 sec.
|
||||
5 rows in set. Elapsed: 0.002 sec.
|
||||
```
|
||||
|
@ -198,8 +198,10 @@ Columns:
|
||||
- `FOREIGN TABLE`
|
||||
- `LOCAL TEMPORARY`
|
||||
- `SYSTEM VIEW`
|
||||
- `data_length` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — The size of
|
||||
the data on-disk. NULL if it could not be determined.
|
||||
- `table_collation` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — The table default collation. Always `utf8mb4_0900_ai_ci`.
|
||||
- `table_comment` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — The comment used when creating the table.
|
||||
- `table_collation` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — The table default collation. Always `utf8mb4`.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -291,7 +293,7 @@ is_trigger_deletable: NO
|
||||
is_trigger_insertable_into: NO
|
||||
```
|
||||
|
||||
## KEY_COLUMN_USAGE (#key_column_usage)
|
||||
## KEY_COLUMN_USAGE {#key_column_usage}
|
||||
|
||||
Contains columns from the [system.tables](../../operations/system-tables/tables.md) system table which are restricted by constraints.
|
||||
|
||||
@ -350,7 +352,7 @@ referenced_table_name: ᴺᵁᴸᴸ
|
||||
referenced_column_name: ᴺᵁᴸᴸ
|
||||
```
|
||||
|
||||
## REFERENTIAL_CONSTRAINTS (#referential_constraints)
|
||||
## REFERENTIAL_CONSTRAINTS {#referential_constraints}
|
||||
|
||||
Contains information about foreign keys. Currently returns an empty result (no rows) which is just enough to provide compatibility with 3rd party tools like Tableau Online.
|
||||
|
||||
|
@ -684,12 +684,12 @@ Like [hasSubsequenceUTF8](#hasSubsequenceUTF8) but searches case-insensitively.
|
||||
|
||||
## byteHammingDistance
|
||||
|
||||
Calculates the hamming distance between two byte strings.
|
||||
Calculates the [hamming distance](https://en.wikipedia.org/wiki/Hamming_distance) between two byte strings.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
byteHammingDistance(haystack, needle)
|
||||
byteHammingDistance(string2, string2)
|
||||
```
|
||||
|
||||
**Examples**
|
||||
@ -710,12 +710,12 @@ Result:
|
||||
|
||||
## byteJaccardIndex
|
||||
|
||||
Calculates the jaccard similarity index between two byte strings.
|
||||
Calculates the [Jaccard similarity index](https://en.wikipedia.org/wiki/Jaccard_index) between two byte strings.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
byteJaccardIndex(haystack, needle)
|
||||
byteJaccardIndex(string1, string2)
|
||||
```
|
||||
|
||||
**Examples**
|
||||
@ -734,12 +734,12 @@ Result:
|
||||
|
||||
## byteEditDistance
|
||||
|
||||
Calculates the edit distance between two byte strings.
|
||||
Calculates the [edit distance](https://en.wikipedia.org/wiki/Edit_distance) between two byte strings.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
byteEidtDistance(haystack, needle)
|
||||
byteEditDistance(string1, string2)
|
||||
```
|
||||
|
||||
**Examples**
|
||||
|
@ -760,27 +760,6 @@
|
||||
</graphite>
|
||||
-->
|
||||
|
||||
<!-- Serve endpoint for Prometheus monitoring. -->
|
||||
<!--
|
||||
endpoint - mertics path (relative to root, statring with "/")
|
||||
port - port to setup server. If not defined or 0 than http_port used
|
||||
metrics - send data from table system.metrics
|
||||
events - send data from table system.events
|
||||
asynchronous_metrics - send data from table system.asynchronous_metrics
|
||||
status_info - send data from different component from CH, ex: Dictionaries status
|
||||
-->
|
||||
<!--
|
||||
<prometheus>
|
||||
<endpoint>/metrics</endpoint>
|
||||
<port>9363</port>
|
||||
|
||||
<metrics>true</metrics>
|
||||
<events>true</events>
|
||||
<asynchronous_metrics>true</asynchronous_metrics>
|
||||
<status_info>true</status_info>
|
||||
</prometheus>
|
||||
-->
|
||||
|
||||
<!-- Query log. Used only for queries with setting log_queries = 1. -->
|
||||
<query_log>
|
||||
<!-- What table to insert data. If table is not exist, it will be created.
|
||||
|
@ -638,7 +638,6 @@ default_session_timeout: 60
|
||||
# metrics - send data from table system.metrics
|
||||
# events - send data from table system.events
|
||||
# asynchronous_metrics - send data from table system.asynchronous_metrics
|
||||
# status_info - send data from different component from CH, ex: Dictionaries status
|
||||
|
||||
# prometheus:
|
||||
# endpoint: /metrics
|
||||
@ -647,7 +646,6 @@ default_session_timeout: 60
|
||||
# metrics: true
|
||||
# events: true
|
||||
# asynchronous_metrics: true
|
||||
# status_info: true
|
||||
|
||||
# Query log. Used only for queries with setting log_queries = 1.
|
||||
query_log:
|
||||
|
@ -760,27 +760,6 @@
|
||||
</graphite>
|
||||
-->
|
||||
|
||||
<!-- Serve endpoint for Prometheus monitoring. -->
|
||||
<!--
|
||||
endpoint - mertics path (relative to root, statring with "/")
|
||||
port - port to setup server. If not defined or 0 than http_port used
|
||||
metrics - send data from table system.metrics
|
||||
events - send data from table system.events
|
||||
asynchronous_metrics - send data from table system.asynchronous_metrics
|
||||
status_info - send data from different component from CH, ex: Dictionaries status
|
||||
-->
|
||||
<!--
|
||||
<prometheus>
|
||||
<endpoint>/metrics</endpoint>
|
||||
<port>9363</port>
|
||||
|
||||
<metrics>true</metrics>
|
||||
<events>true</events>
|
||||
<asynchronous_metrics>true</asynchronous_metrics>
|
||||
<status_info>true</status_info>
|
||||
</prometheus>
|
||||
-->
|
||||
|
||||
<!-- Query log. Used only for queries with setting log_queries = 1. -->
|
||||
<query_log>
|
||||
<!-- What table to insert data. If table is not exist, it will be created.
|
||||
|
@ -969,7 +969,6 @@
|
||||
metrics - send data from table system.metrics
|
||||
events - send data from table system.events
|
||||
asynchronous_metrics - send data from table system.asynchronous_metrics
|
||||
status_info - send data from different component from CH, ex: Dictionaries status
|
||||
-->
|
||||
<!--
|
||||
<prometheus>
|
||||
@ -979,7 +978,6 @@
|
||||
<metrics>true</metrics>
|
||||
<events>true</events>
|
||||
<asynchronous_metrics>true</asynchronous_metrics>
|
||||
<status_info>true</status_info>
|
||||
</prometheus>
|
||||
-->
|
||||
|
||||
|
@ -639,7 +639,6 @@ default_session_timeout: 60
|
||||
# metrics - send data from table system.metrics
|
||||
# events - send data from table system.events
|
||||
# asynchronous_metrics - send data from table system.asynchronous_metrics
|
||||
# status_info - send data from different component from CH, ex: Dictionaries status
|
||||
|
||||
# prometheus:
|
||||
# endpoint: /metrics
|
||||
@ -648,7 +647,6 @@ default_session_timeout: 60
|
||||
# metrics: true
|
||||
# events: true
|
||||
# asynchronous_metrics: true
|
||||
# status_info: true
|
||||
|
||||
# Query log. Used only for queries with setting log_queries = 1.
|
||||
query_log:
|
||||
|
@ -1,10 +1,13 @@
|
||||
#include <Client/MultiplexedConnections.h>
|
||||
|
||||
#include <Common/thread_local_rng.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Core/Protocol.h>
|
||||
#include <IO/ConnectionTimeouts.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <Interpreters/ClientInfo.h>
|
||||
#include <base/getThreadId.h>
|
||||
#include <base/hex.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -19,6 +22,13 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
|
||||
#define MUTEX_LOCK_TEMPORARY_DEBUG_INSTRUMENTATION \
|
||||
mutex_last_locked_by.store((getThreadId() << 32) | __LINE__); \
|
||||
memcpy(mutex_memory_dump.data(), &cancel_mutex, mutex_memory_dump.size()); \
|
||||
mutex_locked += 1; \
|
||||
SCOPE_EXIT({ mutex_locked -= 1; });
|
||||
|
||||
|
||||
MultiplexedConnections::MultiplexedConnections(Connection & connection, const Settings & settings_, const ThrottlerPtr & throttler)
|
||||
: settings(settings_)
|
||||
{
|
||||
@ -73,6 +83,7 @@ MultiplexedConnections::MultiplexedConnections(
|
||||
void MultiplexedConnections::sendScalarsData(Scalars & data)
|
||||
{
|
||||
std::lock_guard lock(cancel_mutex);
|
||||
MUTEX_LOCK_TEMPORARY_DEBUG_INSTRUMENTATION
|
||||
|
||||
if (!sent_query)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot send scalars data: query not yet sent.");
|
||||
@ -88,6 +99,7 @@ void MultiplexedConnections::sendScalarsData(Scalars & data)
|
||||
void MultiplexedConnections::sendExternalTablesData(std::vector<ExternalTablesData> & data)
|
||||
{
|
||||
std::lock_guard lock(cancel_mutex);
|
||||
MUTEX_LOCK_TEMPORARY_DEBUG_INSTRUMENTATION
|
||||
|
||||
if (!sent_query)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot send external tables data: query not yet sent.");
|
||||
@ -116,6 +128,7 @@ void MultiplexedConnections::sendQuery(
|
||||
bool with_pending_data)
|
||||
{
|
||||
std::lock_guard lock(cancel_mutex);
|
||||
MUTEX_LOCK_TEMPORARY_DEBUG_INSTRUMENTATION
|
||||
|
||||
if (sent_query)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Query already sent.");
|
||||
@ -173,6 +186,7 @@ void MultiplexedConnections::sendQuery(
|
||||
void MultiplexedConnections::sendIgnoredPartUUIDs(const std::vector<UUID> & uuids)
|
||||
{
|
||||
std::lock_guard lock(cancel_mutex);
|
||||
MUTEX_LOCK_TEMPORARY_DEBUG_INSTRUMENTATION
|
||||
|
||||
if (sent_query)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot send uuids after query is sent.");
|
||||
@ -189,6 +203,7 @@ void MultiplexedConnections::sendIgnoredPartUUIDs(const std::vector<UUID> & uuid
|
||||
void MultiplexedConnections::sendReadTaskResponse(const String & response)
|
||||
{
|
||||
std::lock_guard lock(cancel_mutex);
|
||||
MUTEX_LOCK_TEMPORARY_DEBUG_INSTRUMENTATION
|
||||
if (cancelled)
|
||||
return;
|
||||
current_connection->sendReadTaskResponse(response);
|
||||
@ -198,6 +213,7 @@ void MultiplexedConnections::sendReadTaskResponse(const String & response)
|
||||
void MultiplexedConnections::sendMergeTreeReadTaskResponse(const ParallelReadResponse & response)
|
||||
{
|
||||
std::lock_guard lock(cancel_mutex);
|
||||
MUTEX_LOCK_TEMPORARY_DEBUG_INSTRUMENTATION
|
||||
if (cancelled)
|
||||
return;
|
||||
current_connection->sendMergeTreeReadTaskResponse(response);
|
||||
@ -207,13 +223,29 @@ void MultiplexedConnections::sendMergeTreeReadTaskResponse(const ParallelReadRes
|
||||
Packet MultiplexedConnections::receivePacket()
|
||||
{
|
||||
std::lock_guard lock(cancel_mutex);
|
||||
MUTEX_LOCK_TEMPORARY_DEBUG_INSTRUMENTATION
|
||||
Packet packet = receivePacketUnlocked({});
|
||||
return packet;
|
||||
}
|
||||
|
||||
void MultiplexedConnections::disconnect()
|
||||
{
|
||||
std::lock_guard lock(cancel_mutex);
|
||||
/// We've seen this lock mysteriously get stuck forever, without any other thread seeming to
|
||||
/// hold the mutex. This is temporary code to print some extra information next time it happens.
|
||||
/// std::lock_guard lock(cancel_mutex);
|
||||
if (!cancel_mutex.try_lock_for(std::chrono::hours(1)))
|
||||
{
|
||||
UInt64 last_locked = mutex_last_locked_by.load();
|
||||
std::array<UInt8, sizeof(std::timed_mutex)> new_memory_dump;
|
||||
memcpy(new_memory_dump.data(), &cancel_mutex, new_memory_dump.size());
|
||||
LOG_ERROR(&Poco::Logger::get("MultiplexedConnections"), "Deadlock in MultiplexedConnections::disconnect()! Mutex was last (instrumentedly) locked by thread {} on line {}, lock balance: {}, mutex memory when last locked: {}, mutex memory now: {}", last_locked >> 32, last_locked & 0xffffffff, mutex_locked.load(), hexString(mutex_memory_dump.data(), mutex_memory_dump.size()), hexString(new_memory_dump.data(), new_memory_dump.size()));
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Deadlock in MultiplexedConnections::disconnect()");
|
||||
}
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wthread-safety-analysis"
|
||||
std::lock_guard lock(cancel_mutex, std::adopt_lock);
|
||||
#pragma clang diagnostic pop
|
||||
MUTEX_LOCK_TEMPORARY_DEBUG_INSTRUMENTATION
|
||||
|
||||
for (ReplicaState & state : replica_states)
|
||||
{
|
||||
@ -229,6 +261,7 @@ void MultiplexedConnections::disconnect()
|
||||
void MultiplexedConnections::sendCancel()
|
||||
{
|
||||
std::lock_guard lock(cancel_mutex);
|
||||
MUTEX_LOCK_TEMPORARY_DEBUG_INSTRUMENTATION
|
||||
|
||||
if (!sent_query || cancelled)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot cancel. Either no query sent or already cancelled.");
|
||||
@ -246,6 +279,7 @@ void MultiplexedConnections::sendCancel()
|
||||
Packet MultiplexedConnections::drain()
|
||||
{
|
||||
std::lock_guard lock(cancel_mutex);
|
||||
MUTEX_LOCK_TEMPORARY_DEBUG_INSTRUMENTATION
|
||||
|
||||
if (!cancelled)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot drain connections: cancel first.");
|
||||
@ -286,6 +320,7 @@ Packet MultiplexedConnections::drain()
|
||||
std::string MultiplexedConnections::dumpAddresses() const
|
||||
{
|
||||
std::lock_guard lock(cancel_mutex);
|
||||
MUTEX_LOCK_TEMPORARY_DEBUG_INSTRUMENTATION
|
||||
return dumpAddressesUnlocked();
|
||||
}
|
||||
|
||||
|
@ -105,9 +105,15 @@ private:
|
||||
/// std::nullopt if parallel reading from replicas is not used
|
||||
std::optional<ReplicaInfo> replica_info;
|
||||
|
||||
/// A mutex for the sendCancel function to execute safely
|
||||
/// in separate thread.
|
||||
mutable std::mutex cancel_mutex;
|
||||
/// A mutex for the sendCancel function to execute safely in separate thread.
|
||||
mutable std::timed_mutex cancel_mutex;
|
||||
|
||||
/// Temporary instrumentation to debug a weird deadlock on cancel_mutex.
|
||||
/// TODO: Once the investigation is done, get rid of these, and of INSTRUMENTED_LOCK_MUTEX, and
|
||||
/// change cancel_mutex to std::mutex.
|
||||
mutable std::atomic<UInt64> mutex_last_locked_by{0};
|
||||
mutable std::atomic<Int64> mutex_locked{0};
|
||||
mutable std::array<UInt8, sizeof(std::timed_mutex)> mutex_memory_dump;
|
||||
|
||||
friend struct RemoteQueryExecutorRoutine;
|
||||
};
|
||||
|
@ -211,6 +211,7 @@
|
||||
\
|
||||
M(RegexpCreated, "Compiled regular expressions. Identical regular expressions compiled just once and cached forever.") \
|
||||
M(ContextLock, "Number of times the lock of Context was acquired or tried to acquire. This is global lock.") \
|
||||
M(ContextLockWaitMicroseconds, "Context lock wait time in microseconds") \
|
||||
\
|
||||
M(StorageBufferFlush, "Number of times a buffer in a 'Buffer' table was flushed.") \
|
||||
M(StorageBufferErrorOnFlush, "Number of times a buffer in the 'Buffer' table has not been able to flush due to error writing in the destination table.") \
|
||||
|
@ -1,57 +0,0 @@
|
||||
#include <Common/StatusInfo.h>
|
||||
#include <Common/ExternalLoaderStatus.h>
|
||||
|
||||
/// Available status. Add something here as you wish.
|
||||
#define APPLY_FOR_STATUS(M) \
|
||||
M(DictionaryStatus, "Dictionary Status.", DB::getStatusEnumAllPossibleValues()) \
|
||||
|
||||
|
||||
namespace CurrentStatusInfo
|
||||
{
|
||||
#define M(NAME, DOCUMENTATION, ENUM) extern const Status NAME = Status(__COUNTER__);
|
||||
APPLY_FOR_STATUS(M)
|
||||
#undef M
|
||||
constexpr Status END = Status(__COUNTER__);
|
||||
|
||||
std::mutex locks[END] {};
|
||||
std::unordered_map<String, Int8> values[END] {};
|
||||
|
||||
const char * getName(Status event)
|
||||
{
|
||||
static const char * strings[] =
|
||||
{
|
||||
#define M(NAME, DOCUMENTATION, ENUM) #NAME,
|
||||
APPLY_FOR_STATUS(M)
|
||||
#undef M
|
||||
};
|
||||
|
||||
return strings[event];
|
||||
}
|
||||
|
||||
const char * getDocumentation(Status event)
|
||||
{
|
||||
static const char * strings[] =
|
||||
{
|
||||
#define M(NAME, DOCUMENTATION, ENUM) #DOCUMENTATION,
|
||||
APPLY_FOR_STATUS(M)
|
||||
#undef M
|
||||
};
|
||||
|
||||
return strings[event];
|
||||
}
|
||||
|
||||
const std::vector<std::pair<String, Int8>> & getAllPossibleValues(Status event)
|
||||
{
|
||||
static const std::vector<std::pair<String, Int8>> enum_values [] =
|
||||
{
|
||||
#define M(NAME, DOCUMENTATION, ENUM) ENUM,
|
||||
APPLY_FOR_STATUS(M)
|
||||
#undef M
|
||||
};
|
||||
return enum_values[event];
|
||||
}
|
||||
|
||||
Status end() { return END; }
|
||||
}
|
||||
|
||||
#undef APPLY_FOR_STATUS
|
@ -1,39 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <stddef.h>
|
||||
#include <cstdint>
|
||||
#include <utility>
|
||||
#include <atomic>
|
||||
#include <vector>
|
||||
#include <base/types.h>
|
||||
#include <base/strong_typedef.h>
|
||||
#include <mutex>
|
||||
#include <unordered_map>
|
||||
|
||||
|
||||
namespace CurrentStatusInfo
|
||||
{
|
||||
using Status = StrongTypedef<size_t, struct StatusTag>;
|
||||
using Key = std::string;
|
||||
|
||||
const char * getName(Status event);
|
||||
const char * getDocumentation(Status event);
|
||||
const std::vector<std::pair<String, Int8>> & getAllPossibleValues(Status event);
|
||||
|
||||
extern std::unordered_map<String, Int8> values[];
|
||||
extern std::mutex locks[];
|
||||
|
||||
Status end();
|
||||
|
||||
inline void set(Status status, Key key, Int8 value)
|
||||
{
|
||||
std::lock_guard lock(locks[status]);
|
||||
values[status][key] = value;
|
||||
}
|
||||
|
||||
inline void unset(Status status, Key key)
|
||||
{
|
||||
std::lock_guard lock(locks[status]);
|
||||
values[status].erase(key);
|
||||
}
|
||||
}
|
@ -149,7 +149,7 @@ class IColumn;
|
||||
M(Bool, allow_suspicious_indices, false, "Reject primary/secondary indexes and sorting keys with identical expressions", 0) \
|
||||
M(Bool, compile_expressions, false, "Compile some scalar functions and operators to native code.", 0) \
|
||||
M(UInt64, min_count_to_compile_expression, 3, "The number of identical expressions before they are JIT-compiled", 0) \
|
||||
M(Bool, compile_aggregate_expressions, true, "Compile aggregate functions to native code. This feature has a bug and should not be used.", 0) \
|
||||
M(Bool, compile_aggregate_expressions, true, "Compile aggregate functions to native code.", 0) \
|
||||
M(UInt64, min_count_to_compile_aggregate_expression, 3, "The number of identical aggregate expressions before they are JIT-compiled", 0) \
|
||||
M(Bool, compile_sort_description, true, "Compile sort description to native code.", 0) \
|
||||
M(UInt64, min_count_to_compile_sort_description, 3, "The number of identical sort descriptions before they are JIT-compiled", 0) \
|
||||
|
@ -18,7 +18,7 @@ extern const int TOO_LARGE_STRING_SIZE;
|
||||
}
|
||||
|
||||
template <typename Op>
|
||||
struct FunctionDistanceImpl
|
||||
struct FunctionStringDistanceImpl
|
||||
{
|
||||
using ResultType = typename Op::ResultType;
|
||||
|
||||
@ -195,12 +195,11 @@ struct NameByteEditDistance
|
||||
static constexpr auto name = "byteEditDistance";
|
||||
};
|
||||
|
||||
using FunctionByteHammingDistance
|
||||
= FunctionsStringSimilarity<FunctionDistanceImpl<ByteHammingDistanceImpl>, NameByteHammingDistance>;
|
||||
using FunctionByteHammingDistance = FunctionsStringSimilarity<FunctionStringDistanceImpl<ByteHammingDistanceImpl>, NameByteHammingDistance>;
|
||||
|
||||
using FunctionByteJaccardIndex = FunctionsStringSimilarity<FunctionDistanceImpl<ByteJaccardIndexImpl>, NameByteJaccardIndex>;
|
||||
using FunctionByteJaccardIndex = FunctionsStringSimilarity<FunctionStringDistanceImpl<ByteJaccardIndexImpl>, NameByteJaccardIndex>;
|
||||
|
||||
using FunctionByteEditDistance = FunctionsStringSimilarity<FunctionDistanceImpl<ByteEditDistanceImpl>, NameByteEditDistance>;
|
||||
using FunctionByteEditDistance = FunctionsStringSimilarity<FunctionStringDistanceImpl<ByteEditDistanceImpl>, NameByteEditDistance>;
|
||||
|
||||
REGISTER_FUNCTION(StringHammingDistance)
|
||||
{
|
||||
|
@ -1096,22 +1096,25 @@ public:
|
||||
return res != nullptr;
|
||||
};
|
||||
|
||||
TypeIndex left_id = arg_then.type->getTypeId();
|
||||
TypeIndex right_id = arg_else.type->getTypeId();
|
||||
DataTypePtr left_type = arg_then.type;
|
||||
DataTypePtr right_type = arg_else.type;
|
||||
|
||||
if (const auto * left_array = checkAndGetDataType<DataTypeArray>(arg_then.type.get()))
|
||||
left_id = left_array->getNestedType()->getTypeId();
|
||||
left_type = left_array->getNestedType();
|
||||
|
||||
if (const auto * right_array = checkAndGetDataType<DataTypeArray>(arg_else.type.get()))
|
||||
right_id = right_array->getNestedType()->getTypeId();
|
||||
right_type = right_array->getNestedType();
|
||||
|
||||
/// Special case when one column is Integer and another is UInt64 that can be actually Int64.
|
||||
/// The result type for this case is Int64 and we need to change UInt64 type to Int64
|
||||
/// so the NumberTraits::ResultOfIf will return Int64 instead if Int128.
|
||||
if (isNativeInteger(arg_then.type) && isUInt64ThatCanBeInt64(arg_else.type))
|
||||
right_id = TypeIndex::Int64;
|
||||
else if (isNativeInteger(arg_else.type) && isUInt64ThatCanBeInt64(arg_then.type))
|
||||
left_id = TypeIndex::Int64;
|
||||
if (isNativeInteger(left_type) && isUInt64ThatCanBeInt64(right_type))
|
||||
right_type = std::make_shared<DataTypeInt64>();
|
||||
else if (isNativeInteger(right_type) && isUInt64ThatCanBeInt64(left_type))
|
||||
left_type = std::make_shared<DataTypeInt64>();
|
||||
|
||||
TypeIndex left_id = left_type->getTypeId();
|
||||
TypeIndex right_id = right_type->getTypeId();
|
||||
|
||||
if (!(callOnBasicTypes<true, true, true, false>(left_id, right_id, call)
|
||||
|| (res = executeTyped<UUID, UUID>(cond_col, arguments, result_type, input_rows_count))
|
||||
|
@ -108,6 +108,7 @@ namespace fs = std::filesystem;
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event ContextLock;
|
||||
extern const Event ContextLockWaitMicroseconds;
|
||||
}
|
||||
|
||||
namespace CurrentMetrics
|
||||
@ -704,7 +705,10 @@ std::unique_lock<std::recursive_mutex> Context::getLock() const
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::ContextLock);
|
||||
CurrentMetrics::Increment increment{CurrentMetrics::ContextLockWait};
|
||||
return std::unique_lock(shared->mutex);
|
||||
Stopwatch watch;
|
||||
auto lock = std::unique_lock(shared->mutex);
|
||||
ProfileEvents::increment(ProfileEvents::ContextLockWaitMicroseconds, watch.elapsedMicroseconds());
|
||||
return lock;
|
||||
}
|
||||
|
||||
ProcessList & Context::getProcessList() { return shared->process_list; }
|
||||
|
@ -1,7 +1,6 @@
|
||||
#include "ExternalLoader.h"
|
||||
|
||||
#include <mutex>
|
||||
#include <pcg_random.hpp>
|
||||
#include <Common/MemoryTrackerBlockerInThread.h>
|
||||
#include <Common/Config/AbstractConfigurationComparison.h>
|
||||
#include <Common/Exception.h>
|
||||
@ -9,7 +8,6 @@
|
||||
#include <Common/ThreadPool.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <Common/setThreadName.h>
|
||||
#include <Common/StatusInfo.h>
|
||||
#include <Common/scope_guard_safe.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <base/chrono_io.h>
|
||||
@ -18,12 +16,6 @@
|
||||
#include <unordered_set>
|
||||
|
||||
|
||||
namespace CurrentStatusInfo
|
||||
{
|
||||
extern const Status DictionaryStatus;
|
||||
}
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
@ -1145,7 +1137,6 @@ private:
|
||||
if (info && (info->loading_id == loading_id))
|
||||
{
|
||||
info->loading_id = info->state_id;
|
||||
CurrentStatusInfo::set(CurrentStatusInfo::DictionaryStatus, name, static_cast<Int8>(info->status()));
|
||||
}
|
||||
min_id_to_finish_loading_dependencies.erase(std::this_thread::get_id());
|
||||
|
||||
@ -1307,7 +1298,6 @@ scope_guard ExternalLoader::addConfigRepository(std::unique_ptr<IExternalLoaderC
|
||||
return [this, ptr, name]()
|
||||
{
|
||||
config_files_reader->removeConfigRepository(ptr);
|
||||
CurrentStatusInfo::unset(CurrentStatusInfo::DictionaryStatus, name);
|
||||
reloadConfig(name);
|
||||
};
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ public:
|
||||
size_t max_block_size_rows_,
|
||||
size_t max_block_size_bytes_);
|
||||
|
||||
const char * getName() const override { return "AggregatingSortedAlgorithm"; }
|
||||
void initialize(Inputs inputs) override;
|
||||
void consume(Input & input, size_t source_num) override;
|
||||
Status merge() override;
|
||||
|
@ -38,6 +38,7 @@ public:
|
||||
WriteBuffer * out_row_sources_buf_ = nullptr,
|
||||
bool use_average_block_sizes = false);
|
||||
|
||||
const char * getName() const override { return "CollapsingSortedAlgorithm"; }
|
||||
Status merge() override;
|
||||
|
||||
private:
|
||||
|
@ -45,6 +45,7 @@ public:
|
||||
size_t max_block_size_rows_,
|
||||
size_t max_block_size_bytes_);
|
||||
|
||||
const char * getName() const override { return "FinishAggregatingInOrderAlgorithm"; }
|
||||
void initialize(Inputs inputs) override;
|
||||
void consume(Input & input, size_t source_num) override;
|
||||
Status merge() override;
|
||||
|
@ -30,6 +30,7 @@ public:
|
||||
Graphite::Params params_,
|
||||
time_t time_of_merge_);
|
||||
|
||||
const char * getName() const override { return "GraphiteRollupSortedAlgorithm"; }
|
||||
Status merge() override;
|
||||
|
||||
struct ColumnsDefinition
|
||||
|
@ -47,6 +47,7 @@ public:
|
||||
|
||||
using Inputs = std::vector<Input>;
|
||||
|
||||
virtual const char * getName() const = 0;
|
||||
virtual void initialize(Inputs inputs) = 0;
|
||||
virtual void consume(Input & input, size_t source_num) = 0;
|
||||
virtual Status merge() = 0;
|
||||
|
@ -26,6 +26,7 @@ public:
|
||||
|
||||
void addInput();
|
||||
|
||||
const char * getName() const override { return "MergingSortedAlgorithm"; }
|
||||
void initialize(Inputs inputs) override;
|
||||
void consume(Input & input, size_t source_num) override;
|
||||
Status merge() override;
|
||||
|
@ -30,6 +30,7 @@ public:
|
||||
bool cleanup = false,
|
||||
size_t * cleanedup_rows_count = nullptr);
|
||||
|
||||
const char * getName() const override { return "ReplacingSortedAlgorithm"; }
|
||||
Status merge() override;
|
||||
|
||||
private:
|
||||
|
@ -25,6 +25,7 @@ public:
|
||||
size_t max_block_size_rows,
|
||||
size_t max_block_size_bytes);
|
||||
|
||||
const char * getName() const override { return "SummingSortedAlgorithm"; }
|
||||
void initialize(Inputs inputs) override;
|
||||
void consume(Input & input, size_t source_num) override;
|
||||
Status merge() override;
|
||||
|
@ -25,6 +25,7 @@ public:
|
||||
WriteBuffer * out_row_sources_buf_ = nullptr,
|
||||
bool use_average_block_sizes = false);
|
||||
|
||||
const char * getName() const override { return "VersionedCollapsingAlgorithm"; }
|
||||
Status merge() override;
|
||||
|
||||
private:
|
||||
|
@ -59,6 +59,7 @@ class ColumnGathererStream final : public IMergingAlgorithm
|
||||
public:
|
||||
ColumnGathererStream(size_t num_inputs, ReadBuffer & row_sources_buf_, size_t block_preferred_size_ = DEFAULT_BLOCK_SIZE);
|
||||
|
||||
const char * getName() const override { return "ColumnGathererStream"; }
|
||||
void initialize(Inputs inputs) override;
|
||||
void consume(Input & input, size_t source_num) override;
|
||||
Status merge() override;
|
||||
|
@ -17,9 +17,25 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int SET_SIZE_LIMIT_EXCEEDED;
|
||||
extern const int UNKNOWN_EXCEPTION;
|
||||
}
|
||||
|
||||
CreatingSetsTransform::~CreatingSetsTransform() = default;
|
||||
CreatingSetsTransform::~CreatingSetsTransform()
|
||||
{
|
||||
if (promise_to_build)
|
||||
{
|
||||
/// set_exception can also throw
|
||||
try
|
||||
{
|
||||
promise_to_build->set_exception(std::make_exception_ptr(
|
||||
Exception(ErrorCodes::UNKNOWN_EXCEPTION, "Failed to build set, most likely pipeline executor was stopped")));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "Failed to set_exception for promise");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CreatingSetsTransform::CreatingSetsTransform(
|
||||
Block in_header_,
|
||||
@ -86,7 +102,7 @@ void CreatingSetsTransform::startSubquery()
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_TRACE(log, "Waiting for set to be build by another thread, key: {}", set_and_key->key);
|
||||
LOG_TRACE(log, "Waiting for set to be built by another thread, key: {}", set_and_key->key);
|
||||
SharedSet set_built_by_another_thread = std::move(std::get<1>(from_cache));
|
||||
const SetPtr & ready_set = set_built_by_another_thread.get();
|
||||
if (!ready_set)
|
||||
@ -188,7 +204,10 @@ Chunk CreatingSetsTransform::generate()
|
||||
{
|
||||
set_and_key->set->finishInsert();
|
||||
if (promise_to_build)
|
||||
{
|
||||
promise_to_build->set_value(set_and_key->set);
|
||||
promise_to_build.reset();
|
||||
}
|
||||
}
|
||||
|
||||
if (table_out.initialized())
|
||||
|
@ -229,6 +229,7 @@ class MergeJoinAlgorithm final : public IMergingAlgorithm
|
||||
public:
|
||||
explicit MergeJoinAlgorithm(JoinPtr table_join, const Blocks & input_headers, size_t max_block_size_);
|
||||
|
||||
const char * getName() const override { return "MergeJoinAlgorithm"; }
|
||||
virtual void initialize(Inputs inputs) override;
|
||||
virtual void consume(Input & input, size_t source_num) override;
|
||||
virtual Status merge() override;
|
||||
|
@ -1,7 +1,6 @@
|
||||
#include "PrometheusMetricsWriter.h"
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/StatusInfo.h>
|
||||
#include <regex> /// TODO: this library is harmful.
|
||||
#include <algorithm>
|
||||
|
||||
@ -51,7 +50,6 @@ PrometheusMetricsWriter::PrometheusMetricsWriter(
|
||||
, send_events(config.getBool(config_name + ".events", true))
|
||||
, send_metrics(config.getBool(config_name + ".metrics", true))
|
||||
, send_asynchronous_metrics(config.getBool(config_name + ".asynchronous_metrics", true))
|
||||
, send_status_info(config.getBool(config_name + ".status_info", true))
|
||||
{
|
||||
}
|
||||
|
||||
@ -120,42 +118,6 @@ void PrometheusMetricsWriter::write(WriteBuffer & wb) const
|
||||
writeOutLine(wb, key, value.value);
|
||||
}
|
||||
}
|
||||
|
||||
if (send_status_info)
|
||||
{
|
||||
for (size_t i = 0, end = CurrentStatusInfo::end(); i < end; ++i)
|
||||
{
|
||||
std::lock_guard lock(CurrentStatusInfo::locks[static_cast<CurrentStatusInfo::Status>(i)]);
|
||||
std::string metric_name{CurrentStatusInfo::getName(static_cast<CurrentStatusInfo::Status>(i))};
|
||||
std::string metric_doc{CurrentStatusInfo::getDocumentation(static_cast<CurrentStatusInfo::Status>(i))};
|
||||
|
||||
convertHelpToSingleLine(metric_doc);
|
||||
|
||||
if (!replaceInvalidChars(metric_name))
|
||||
continue;
|
||||
std::string key{current_status_prefix + metric_name};
|
||||
|
||||
writeOutLine(wb, "# HELP", key, metric_doc);
|
||||
writeOutLine(wb, "# TYPE", key, "gauge");
|
||||
|
||||
for (const auto & value: CurrentStatusInfo::values[i])
|
||||
{
|
||||
for (const auto & enum_value: CurrentStatusInfo::getAllPossibleValues(static_cast<CurrentStatusInfo::Status>(i)))
|
||||
{
|
||||
DB::writeText(key, wb);
|
||||
DB::writeChar('{', wb);
|
||||
DB::writeText(key, wb);
|
||||
DB::writeChar('=', wb);
|
||||
writeDoubleQuotedString(enum_value.first, wb);
|
||||
DB::writeText(",name=", wb);
|
||||
writeDoubleQuotedString(value.first, wb);
|
||||
DB::writeText("} ", wb);
|
||||
DB::writeText(value.second == enum_value.second, wb);
|
||||
DB::writeChar('\n', wb);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -27,12 +27,10 @@ private:
|
||||
const bool send_events;
|
||||
const bool send_metrics;
|
||||
const bool send_asynchronous_metrics;
|
||||
const bool send_status_info;
|
||||
|
||||
static inline constexpr auto profile_events_prefix = "ClickHouseProfileEvents_";
|
||||
static inline constexpr auto current_metrics_prefix = "ClickHouseMetrics_";
|
||||
static inline constexpr auto asynchronous_metrics_prefix = "ClickHouseAsyncMetrics_";
|
||||
static inline constexpr auto current_status_prefix = "ClickHouseStatusInfo_";
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -365,7 +365,7 @@ Block MergeTreeDataWriter::mergeBlock(
|
||||
|
||||
/// Check that after first merge merging_algorithm is waiting for data from input 0.
|
||||
if (status.required_source != 0)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: required source after the first merge is not 0.");
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: required source after the first merge is not 0. Chunk rows: {}, is_finished: {}, required_source: {}, algorithm: {}", status.chunk.getNumRows(), status.is_finished, status.required_source, merging_algorithm->getName());
|
||||
|
||||
status = merging_algorithm->merge();
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <DataTypes/DataTypeMap.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypeEnum.h>
|
||||
@ -15,6 +16,15 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int DICTIONARIES_WAS_NOT_LOADED;
|
||||
extern const int FUNCTION_NOT_ALLOWED;
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
extern const int SUPPORT_IS_DISABLED;
|
||||
};
|
||||
|
||||
enum class FunctionOrigin : Int8
|
||||
{
|
||||
SYSTEM = 0,
|
||||
@ -29,6 +39,7 @@ namespace
|
||||
MutableColumns & res_columns,
|
||||
const String & name,
|
||||
UInt64 is_aggregate,
|
||||
std::optional<UInt64> is_deterministic,
|
||||
const String & create_query,
|
||||
FunctionOrigin function_origin,
|
||||
const Factory & factory)
|
||||
@ -36,53 +47,58 @@ namespace
|
||||
res_columns[0]->insert(name);
|
||||
res_columns[1]->insert(is_aggregate);
|
||||
|
||||
if (!is_deterministic.has_value())
|
||||
res_columns[2]->insertDefault();
|
||||
else
|
||||
res_columns[2]->insert(*is_deterministic);
|
||||
|
||||
if constexpr (std::is_same_v<Factory, UserDefinedSQLFunctionFactory> || std::is_same_v<Factory, UserDefinedExecutableFunctionFactory>)
|
||||
{
|
||||
res_columns[2]->insert(false);
|
||||
res_columns[3]->insertDefault();
|
||||
res_columns[3]->insert(false);
|
||||
res_columns[4]->insertDefault();
|
||||
}
|
||||
else
|
||||
{
|
||||
res_columns[2]->insert(factory.isCaseInsensitive(name));
|
||||
res_columns[3]->insert(factory.isCaseInsensitive(name));
|
||||
if (factory.isAlias(name))
|
||||
res_columns[3]->insert(factory.aliasTo(name));
|
||||
res_columns[4]->insert(factory.aliasTo(name));
|
||||
else
|
||||
res_columns[3]->insertDefault();
|
||||
res_columns[4]->insertDefault();
|
||||
}
|
||||
|
||||
res_columns[4]->insert(create_query);
|
||||
res_columns[5]->insert(static_cast<Int8>(function_origin));
|
||||
res_columns[5]->insert(create_query);
|
||||
res_columns[6]->insert(static_cast<Int8>(function_origin));
|
||||
|
||||
if constexpr (std::is_same_v<Factory, FunctionFactory>)
|
||||
{
|
||||
if (factory.isAlias(name))
|
||||
{
|
||||
res_columns[6]->insertDefault();
|
||||
res_columns[7]->insertDefault();
|
||||
res_columns[8]->insertDefault();
|
||||
res_columns[9]->insertDefault();
|
||||
res_columns[10]->insertDefault();
|
||||
res_columns[11]->insertDefault();
|
||||
res_columns[12]->insertDefault();
|
||||
}
|
||||
else
|
||||
{
|
||||
auto documentation = factory.getDocumentation(name);
|
||||
res_columns[6]->insert(documentation.description);
|
||||
res_columns[7]->insert(documentation.syntax);
|
||||
res_columns[8]->insert(documentation.argumentsAsString());
|
||||
res_columns[9]->insert(documentation.returned_value);
|
||||
res_columns[10]->insert(documentation.examplesAsString());
|
||||
res_columns[11]->insert(documentation.categoriesAsString());
|
||||
res_columns[7]->insert(documentation.description);
|
||||
res_columns[8]->insert(documentation.syntax);
|
||||
res_columns[9]->insert(documentation.argumentsAsString());
|
||||
res_columns[10]->insert(documentation.returned_value);
|
||||
res_columns[11]->insert(documentation.examplesAsString());
|
||||
res_columns[12]->insert(documentation.categoriesAsString());
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
res_columns[6]->insertDefault();
|
||||
res_columns[7]->insertDefault();
|
||||
res_columns[8]->insertDefault();
|
||||
res_columns[9]->insertDefault();
|
||||
res_columns[10]->insertDefault();
|
||||
res_columns[11]->insertDefault();
|
||||
res_columns[12]->insertDefault();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -102,6 +118,7 @@ NamesAndTypesList StorageSystemFunctions::getNamesAndTypes()
|
||||
return {
|
||||
{"name", std::make_shared<DataTypeString>()},
|
||||
{"is_aggregate", std::make_shared<DataTypeUInt8>()},
|
||||
{"is_deterministic", std::make_shared<DataTypeNullable>(std::make_shared<DataTypeUInt8>())},
|
||||
{"case_insensitive", std::make_shared<DataTypeUInt8>()},
|
||||
{"alias_to", std::make_shared<DataTypeString>()},
|
||||
{"create_query", std::make_shared<DataTypeString>()},
|
||||
@ -121,14 +138,34 @@ void StorageSystemFunctions::fillData(MutableColumns & res_columns, ContextPtr c
|
||||
const auto & function_names = functions_factory.getAllRegisteredNames();
|
||||
for (const auto & function_name : function_names)
|
||||
{
|
||||
fillRow(res_columns, function_name, UInt64(0), "", FunctionOrigin::SYSTEM, functions_factory);
|
||||
std::optional<UInt64> is_deterministic;
|
||||
try
|
||||
{
|
||||
is_deterministic = functions_factory.tryGet(function_name, context)->isDeterministic();
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
/// Some functions throw because they need special configuration or setup before use.
|
||||
if (e.code() == ErrorCodes::DICTIONARIES_WAS_NOT_LOADED
|
||||
|| e.code() == ErrorCodes::FUNCTION_NOT_ALLOWED
|
||||
|| e.code() == ErrorCodes::LOGICAL_ERROR
|
||||
|| e.code() == ErrorCodes::NOT_IMPLEMENTED
|
||||
|| e.code() == ErrorCodes::SUPPORT_IS_DISABLED)
|
||||
{
|
||||
/// Ignore exception, show is_deterministic = NULL.
|
||||
}
|
||||
else
|
||||
throw;
|
||||
}
|
||||
|
||||
fillRow(res_columns, function_name, 0, is_deterministic, "", FunctionOrigin::SYSTEM, functions_factory);
|
||||
}
|
||||
|
||||
const auto & aggregate_functions_factory = AggregateFunctionFactory::instance();
|
||||
const auto & aggregate_function_names = aggregate_functions_factory.getAllRegisteredNames();
|
||||
for (const auto & function_name : aggregate_function_names)
|
||||
{
|
||||
fillRow(res_columns, function_name, UInt64(1), "", FunctionOrigin::SYSTEM, aggregate_functions_factory);
|
||||
fillRow(res_columns, function_name, 1, {1}, "", FunctionOrigin::SYSTEM, aggregate_functions_factory);
|
||||
}
|
||||
|
||||
const auto & user_defined_sql_functions_factory = UserDefinedSQLFunctionFactory::instance();
|
||||
@ -136,14 +173,14 @@ void StorageSystemFunctions::fillData(MutableColumns & res_columns, ContextPtr c
|
||||
for (const auto & function_name : user_defined_sql_functions_names)
|
||||
{
|
||||
auto create_query = queryToString(user_defined_sql_functions_factory.get(function_name));
|
||||
fillRow(res_columns, function_name, UInt64(0), create_query, FunctionOrigin::SQL_USER_DEFINED, user_defined_sql_functions_factory);
|
||||
fillRow(res_columns, function_name, 0, {0}, create_query, FunctionOrigin::SQL_USER_DEFINED, user_defined_sql_functions_factory);
|
||||
}
|
||||
|
||||
const auto & user_defined_executable_functions_factory = UserDefinedExecutableFunctionFactory::instance();
|
||||
const auto & user_defined_executable_functions_names = user_defined_executable_functions_factory.getRegisteredNames(context);
|
||||
for (const auto & function_name : user_defined_executable_functions_names)
|
||||
{
|
||||
fillRow(res_columns, function_name, UInt64(0), "", FunctionOrigin::EXECUTABLE_USER_DEFINED, user_defined_executable_functions_factory);
|
||||
fillRow(res_columns, function_name, 0, {0}, "", FunctionOrigin::EXECUTABLE_USER_DEFINED, user_defined_executable_functions_factory);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -61,12 +61,14 @@ static constexpr std::string_view tables = R"(
|
||||
`table_schema` String,
|
||||
`table_name` String,
|
||||
`table_type` String,
|
||||
`data_length` Nullable(UInt64),
|
||||
`table_collation` Nullable(String),
|
||||
`table_comment` Nullable(String),
|
||||
`TABLE_CATALOG` String,
|
||||
`TABLE_SCHEMA` String,
|
||||
`TABLE_NAME` String,
|
||||
`TABLE_TYPE` String,
|
||||
`DATA_LENGTH` Nullable(UInt64),
|
||||
`TABLE_COLLATION` Nullable(String),
|
||||
`TABLE_COMMENT` Nullable(String)
|
||||
) AS
|
||||
@ -80,12 +82,14 @@ static constexpr std::string_view tables = R"(
|
||||
has_own_data = 0, 'FOREIGN TABLE',
|
||||
'BASE TABLE'
|
||||
) AS table_type,
|
||||
total_bytes AS data_length,
|
||||
'utf8mb4_0900_ai_ci' AS table_collation,
|
||||
comment AS table_comment,
|
||||
table_catalog AS TABLE_CATALOG,
|
||||
table_schema AS TABLE_SCHEMA,
|
||||
table_name AS TABLE_NAME,
|
||||
table_type AS TABLE_TYPE,
|
||||
data_length AS DATA_LENGTH,
|
||||
table_collation AS TABLE_COLLATION,
|
||||
table_comment AS TABLE_COMMENT
|
||||
FROM system.tables
|
||||
|
@ -27,7 +27,7 @@ CONTAINER_NAME = f"{VOLUME_NAME}_{random_str()}"
|
||||
|
||||
CONFIG_DIR_IN_REPO = "programs/server"
|
||||
INTEGRATION_DIR_IN_REPO = "tests/integration"
|
||||
SRC_DIR_IN_REPO = "src"
|
||||
UTILS_DIR_IN_REPO = "utils"
|
||||
|
||||
DIND_INTEGRATION_TESTS_IMAGE_NAME = "clickhouse/integration-tests-runner"
|
||||
|
||||
@ -89,12 +89,16 @@ def check_args_and_update_paths(args):
|
||||
)
|
||||
logging.info("Cases dir is not set. Will use %s" % (args.cases_dir))
|
||||
|
||||
if args.src_dir:
|
||||
if not os.path.isabs(args.src_dir):
|
||||
args.src_dir = os.path.abspath(os.path.join(CURRENT_WORK_DIR, args.src_dir))
|
||||
if args.utils_dir:
|
||||
if not os.path.isabs(args.utils_dir):
|
||||
args.utils_dir = os.path.abspath(
|
||||
os.path.join(CURRENT_WORK_DIR, args.utils_dir)
|
||||
)
|
||||
else:
|
||||
args.src_dir = os.path.abspath(os.path.join(CLICKHOUSE_ROOT, SRC_DIR_IN_REPO))
|
||||
logging.info("src dir is not set. Will use %s" % (args.src_dir))
|
||||
args.utils_dir = os.path.abspath(
|
||||
os.path.join(CLICKHOUSE_ROOT, UTILS_DIR_IN_REPO)
|
||||
)
|
||||
logging.info("utils dir is not set. Will use %s" % (args.utils_dir))
|
||||
|
||||
logging.info(
|
||||
"base_configs_dir: {}, binary: {}, cases_dir: {} ".format(
|
||||
@ -115,7 +119,7 @@ def check_args_and_update_paths(args):
|
||||
|
||||
if args.dockerd_volume:
|
||||
if not os.path.isabs(args.dockerd_volume):
|
||||
args.src_dir = os.path.abspath(
|
||||
args.dockerd_volume = os.path.abspath(
|
||||
os.path.join(CURRENT_WORK_DIR, args.dockerd_volume)
|
||||
)
|
||||
|
||||
@ -197,9 +201,9 @@ if __name__ == "__main__":
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--src-dir",
|
||||
default=os.environ.get("CLICKHOUSE_SRC_DIR"),
|
||||
help="Path to the 'src' directory in repository. Used to provide schemas (e.g. *.proto) for some tests when those schemas are located in the 'src' directory",
|
||||
"--utils-dir",
|
||||
default=os.environ.get("CLICKHOUSE_UTILS_DIR"),
|
||||
help="Path to the 'utils' directory in repository. Used to provide Python modules for grpc protocol schemas which are located in the 'utils' directory",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
@ -424,7 +428,7 @@ if __name__ == "__main__":
|
||||
f"--volume={args.library_bridge_binary}:/clickhouse-library-bridge "
|
||||
f"--volume={args.base_configs_dir}:/clickhouse-config "
|
||||
f"--volume={args.cases_dir}:/ClickHouse/tests/integration "
|
||||
f"--volume={args.src_dir}/Server/grpc_protos:/ClickHouse/src/Server/grpc_protos "
|
||||
f"--volume={args.utils_dir}/grpc-client/pb2:/ClickHouse/utils/grpc-client/pb2 "
|
||||
f"--volume=/run:/run/host:ro {dockerd_internal_volume} {env_tags} {env_cleanup} "
|
||||
f"-e DOCKER_CLIENT_TIMEOUT=300 -e COMPOSE_HTTP_TIMEOUT=600 {use_analyzer} -e PYTHONUNBUFFERED=1 "
|
||||
f'-e PYTEST_ADDOPTS="{parallel_args} {pytest_opts} {tests_list} {rand_args} -vvv"'
|
||||
|
@ -674,27 +674,6 @@
|
||||
</graphite>
|
||||
-->
|
||||
|
||||
<!-- Serve endpoint for Prometheus monitoring. -->
|
||||
<!--
|
||||
endpoint - mertics path (relative to root, statring with "/")
|
||||
port - port to setup server. If not defined or 0 than http_port used
|
||||
metrics - send data from table system.metrics
|
||||
events - send data from table system.events
|
||||
asynchronous_metrics - send data from table system.asynchronous_metrics
|
||||
status_info - send data from different component from CH, ex: Dictionaries status
|
||||
-->
|
||||
<!--
|
||||
<prometheus>
|
||||
<endpoint>/metrics</endpoint>
|
||||
<port>9363</port>
|
||||
|
||||
<metrics>true</metrics>
|
||||
<events>true</events>
|
||||
<asynchronous_metrics>true</asynchronous_metrics>
|
||||
<status_info>true</status_info>
|
||||
</prometheus>
|
||||
-->
|
||||
|
||||
<!-- Query log. Used only for queries with setting log_queries = 1. -->
|
||||
<query_log>
|
||||
<!-- What table to insert data. If table is not exist, it will be created.
|
||||
|
1
tests/integration/test_grpc_protocol/pb2
Symbolic link
1
tests/integration/test_grpc_protocol/pb2
Symbolic link
@ -0,0 +1 @@
|
||||
../../../utils/grpc-client/pb2
|
@ -1 +0,0 @@
|
||||
../../../../src/Server/grpc_protos/clickhouse_grpc.proto
|
@ -10,32 +10,20 @@ from threading import Thread
|
||||
import gzip
|
||||
import lz4.frame
|
||||
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
pb2_dir = os.path.join(script_dir, "pb2")
|
||||
if pb2_dir not in sys.path:
|
||||
sys.path.append(pb2_dir)
|
||||
import clickhouse_grpc_pb2, clickhouse_grpc_pb2_grpc # Execute pb2/generate.py to generate these modules.
|
||||
|
||||
|
||||
GRPC_PORT = 9100
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
DEFAULT_ENCODING = "utf-8"
|
||||
|
||||
|
||||
# Use grpcio-tools to generate *pb2.py files from *.proto.
|
||||
|
||||
proto_dir = os.path.join(SCRIPT_DIR, "./protos")
|
||||
gen_dir = os.path.join(SCRIPT_DIR, "./_gen")
|
||||
os.makedirs(gen_dir, exist_ok=True)
|
||||
run_and_check(
|
||||
"python3 -m grpc_tools.protoc -I{proto_dir} --python_out={gen_dir} --grpc_python_out={gen_dir} \
|
||||
{proto_dir}/clickhouse_grpc.proto".format(
|
||||
proto_dir=proto_dir, gen_dir=gen_dir
|
||||
),
|
||||
shell=True,
|
||||
)
|
||||
|
||||
sys.path.append(gen_dir)
|
||||
import clickhouse_grpc_pb2
|
||||
import clickhouse_grpc_pb2_grpc
|
||||
|
||||
|
||||
# Utilities
|
||||
|
||||
config_dir = os.path.join(SCRIPT_DIR, "./configs")
|
||||
config_dir = os.path.join(script_dir, "./configs")
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node = cluster.add_instance(
|
||||
"node",
|
||||
|
1
tests/integration/test_grpc_protocol_ssl/pb2
Symbolic link
1
tests/integration/test_grpc_protocol_ssl/pb2
Symbolic link
@ -0,0 +1 @@
|
||||
../../../utils/grpc-client/pb2
|
@ -1 +0,0 @@
|
||||
../../../../src/Server/grpc_protos/clickhouse_grpc.proto
|
@ -4,35 +4,23 @@ import sys
|
||||
import grpc
|
||||
from helpers.cluster import ClickHouseCluster, run_and_check
|
||||
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
pb2_dir = os.path.join(script_dir, "pb2")
|
||||
if pb2_dir not in sys.path:
|
||||
sys.path.append(pb2_dir)
|
||||
import clickhouse_grpc_pb2, clickhouse_grpc_pb2_grpc # Execute pb2/generate.py to generate these modules.
|
||||
|
||||
|
||||
# The test cluster is configured with certificate for that host name, see 'server-ext.cnf'.
|
||||
# The client have to verify server certificate against that name. Client uses SNI
|
||||
SSL_HOST = "integration-tests.clickhouse.com"
|
||||
GRPC_PORT = 9100
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
DEFAULT_ENCODING = "utf-8"
|
||||
|
||||
|
||||
# Use grpcio-tools to generate *pb2.py files from *.proto.
|
||||
|
||||
proto_dir = os.path.join(SCRIPT_DIR, "./protos")
|
||||
gen_dir = os.path.join(SCRIPT_DIR, "./_gen")
|
||||
os.makedirs(gen_dir, exist_ok=True)
|
||||
run_and_check(
|
||||
"python3 -m grpc_tools.protoc -I{proto_dir} --python_out={gen_dir} --grpc_python_out={gen_dir} \
|
||||
{proto_dir}/clickhouse_grpc.proto".format(
|
||||
proto_dir=proto_dir, gen_dir=gen_dir
|
||||
),
|
||||
shell=True,
|
||||
)
|
||||
|
||||
sys.path.append(gen_dir)
|
||||
import clickhouse_grpc_pb2
|
||||
import clickhouse_grpc_pb2_grpc
|
||||
|
||||
|
||||
# Utilities
|
||||
|
||||
config_dir = os.path.join(SCRIPT_DIR, "./configs")
|
||||
config_dir = os.path.join(script_dir, "./configs")
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node = cluster.add_instance(
|
||||
"node",
|
||||
|
@ -0,0 +1 @@
|
||||
../../../utils/grpc-client/pb2
|
@ -1 +0,0 @@
|
||||
../../../../src/Server/grpc_protos/clickhouse_grpc.proto
|
@ -9,9 +9,15 @@ import threading
|
||||
|
||||
from helpers.cluster import ClickHouseCluster, run_and_check
|
||||
from helpers.test_tools import assert_logs_contain_with_retry
|
||||
|
||||
from helpers.uclient import client, prompt
|
||||
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
grpc_protocol_pb2_dir = os.path.join(script_dir, "grpc_protocol_pb2")
|
||||
if grpc_protocol_pb2_dir not in sys.path:
|
||||
sys.path.append(grpc_protocol_pb2_dir)
|
||||
import clickhouse_grpc_pb2, clickhouse_grpc_pb2_grpc # Execute grpc_protocol_pb2/generate.py to generate these modules.
|
||||
|
||||
|
||||
MAX_SESSIONS_FOR_USER = 2
|
||||
POSTGRES_SERVER_PORT = 5433
|
||||
MYSQL_SERVER_PORT = 9001
|
||||
@ -20,22 +26,8 @@ GRPC_PORT = 9100
|
||||
TEST_USER = "test_user"
|
||||
TEST_PASSWORD = "123"
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
DEFAULT_ENCODING = "utf-8"
|
||||
|
||||
# Use grpcio-tools to generate *pb2.py files from *.proto.
|
||||
proto_dir = os.path.join(SCRIPT_DIR, "./protos")
|
||||
gen_dir = os.path.join(SCRIPT_DIR, "./_gen")
|
||||
os.makedirs(gen_dir, exist_ok=True)
|
||||
run_and_check(
|
||||
f"python3 -m grpc_tools.protoc -I{proto_dir} --python_out={gen_dir} --grpc_python_out={gen_dir} {proto_dir}/clickhouse_grpc.proto",
|
||||
shell=True,
|
||||
)
|
||||
|
||||
sys.path.append(gen_dir)
|
||||
|
||||
import clickhouse_grpc_pb2
|
||||
import clickhouse_grpc_pb2_grpc
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
instance = cluster.add_instance(
|
||||
|
1
tests/integration/test_server_reload/grpc_protocol_pb2
Symbolic link
1
tests/integration/test_server_reload/grpc_protocol_pb2
Symbolic link
@ -0,0 +1 @@
|
||||
../../../utils/grpc-client/pb2
|
@ -1 +0,0 @@
|
||||
../../../../src/Server/grpc_protos/clickhouse_grpc.proto
|
@ -22,6 +22,13 @@ from pathlib import Path
|
||||
from requests.exceptions import ConnectionError
|
||||
from urllib3.util.retry import Retry
|
||||
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
grpc_protocol_pb2_dir = os.path.join(script_dir, "grpc_protocol_pb2")
|
||||
if grpc_protocol_pb2_dir not in sys.path:
|
||||
sys.path.append(grpc_protocol_pb2_dir)
|
||||
import clickhouse_grpc_pb2, clickhouse_grpc_pb2_grpc # Execute grpc_protocol_pb2/generate.py to generate these modules.
|
||||
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
instance = cluster.add_instance(
|
||||
"instance",
|
||||
@ -44,22 +51,6 @@ instance = cluster.add_instance(
|
||||
LOADS_QUERY = "SELECT value FROM system.events WHERE event = 'MainConfigLoads'"
|
||||
|
||||
|
||||
# Use grpcio-tools to generate *pb2.py files from *.proto.
|
||||
|
||||
proto_dir = Path(__file__).parent / "protos"
|
||||
gen_dir = Path(__file__).parent / "_gen"
|
||||
gen_dir.mkdir(exist_ok=True)
|
||||
run_and_check(
|
||||
f"python3 -m grpc_tools.protoc -I{proto_dir!s} --python_out={gen_dir!s} --grpc_python_out={gen_dir!s} \
|
||||
{proto_dir!s}/clickhouse_grpc.proto",
|
||||
shell=True,
|
||||
)
|
||||
|
||||
sys.path.append(str(gen_dir))
|
||||
import clickhouse_grpc_pb2
|
||||
import clickhouse_grpc_pb2_grpc
|
||||
|
||||
|
||||
@pytest.fixture(name="cluster", scope="module")
|
||||
def fixture_cluster():
|
||||
try:
|
||||
|
1
tests/integration/test_session_log/grpc_protocol_pb2
Symbolic link
1
tests/integration/test_session_log/grpc_protocol_pb2
Symbolic link
@ -0,0 +1 @@
|
||||
../../../utils/grpc-client/pb2
|
@ -1 +0,0 @@
|
||||
../../../../src/Server/grpc_protos/clickhouse_grpc.proto
|
@ -8,27 +8,19 @@ import threading
|
||||
|
||||
from helpers.cluster import ClickHouseCluster, run_and_check
|
||||
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
grpc_protocol_pb2_dir = os.path.join(script_dir, "grpc_protocol_pb2")
|
||||
if grpc_protocol_pb2_dir not in sys.path:
|
||||
sys.path.append(grpc_protocol_pb2_dir)
|
||||
import clickhouse_grpc_pb2, clickhouse_grpc_pb2_grpc # Execute grpc_protocol_pb2/generate.py to generate these modules.
|
||||
|
||||
|
||||
POSTGRES_SERVER_PORT = 5433
|
||||
MYSQL_SERVER_PORT = 9001
|
||||
GRPC_PORT = 9100
|
||||
SESSION_LOG_MATCHING_FIELDS = "auth_id, auth_type, client_version_major, client_version_minor, client_version_patch, interface"
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
DEFAULT_ENCODING = "utf-8"
|
||||
|
||||
# Use grpcio-tools to generate *pb2.py files from *.proto.
|
||||
proto_dir = os.path.join(SCRIPT_DIR, "./protos")
|
||||
gen_dir = os.path.join(SCRIPT_DIR, "./_gen")
|
||||
os.makedirs(gen_dir, exist_ok=True)
|
||||
run_and_check(
|
||||
f"python3 -m grpc_tools.protoc -I{proto_dir} --python_out={gen_dir} --grpc_python_out={gen_dir} {proto_dir}/clickhouse_grpc.proto",
|
||||
shell=True,
|
||||
)
|
||||
|
||||
sys.path.append(gen_dir)
|
||||
|
||||
import clickhouse_grpc_pb2
|
||||
import clickhouse_grpc_pb2_grpc
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
instance = cluster.add_instance(
|
||||
|
@ -22,34 +22,36 @@ referential_constraints
|
||||
schemata
|
||||
tables
|
||||
views
|
||||
-- information_schema.schemata
|
||||
INFORMATION_SCHEMA INFORMATION_SCHEMA default \N \N \N \N INFORMATION_SCHEMA INFORMATION_SCHEMA default \N \N \N \N
|
||||
information_schema information_schema default \N \N \N \N information_schema information_schema default \N \N \N \N
|
||||
default default kcu BASE TABLE utf8mb4_0900_ai_ci default default kcu BASE TABLE utf8mb4_0900_ai_ci
|
||||
default default kcu2 BASE TABLE utf8mb4_0900_ai_ci default default kcu2 BASE TABLE utf8mb4_0900_ai_ci
|
||||
default default mv VIEW utf8mb4_0900_ai_ci default default mv VIEW utf8mb4_0900_ai_ci
|
||||
default default t FOREIGN TABLE utf8mb4_0900_ai_ci default default t FOREIGN TABLE utf8mb4_0900_ai_ci
|
||||
default default v VIEW utf8mb4_0900_ai_ci default default v VIEW utf8mb4_0900_ai_ci
|
||||
tmp LOCAL TEMPORARY utf8mb4_0900_ai_ci tmp LOCAL TEMPORARY utf8mb4_0900_ai_ci
|
||||
-- information_schema.tables
|
||||
default default kcu1 BASE TABLE 0 utf8mb4_0900_ai_ci default default kcu1 BASE TABLE 0 utf8mb4_0900_ai_ci
|
||||
default default kcu2 BASE TABLE 0 utf8mb4_0900_ai_ci default default kcu2 BASE TABLE 0 utf8mb4_0900_ai_ci
|
||||
default default mv VIEW 0 utf8mb4_0900_ai_ci default default mv VIEW 0 utf8mb4_0900_ai_ci
|
||||
default default t FOREIGN TABLE 0 utf8mb4_0900_ai_ci default default t FOREIGN TABLE 0 utf8mb4_0900_ai_ci
|
||||
default default v VIEW \N utf8mb4_0900_ai_ci default default v VIEW \N utf8mb4_0900_ai_ci
|
||||
-- information_schema.views
|
||||
default default mv SELECT * FROM system.one NONE NO YES NO NO NO default default mv SELECT * FROM system.one NONE NO YES NO NO NO
|
||||
default default v SELECT n, f FROM default.t NONE NO NO NO NO NO default default v SELECT n, f FROM default.t NONE NO NO NO NO NO
|
||||
default default kcu i 1 0 UInt32 \N \N 32 2 0 \N \N \N \N \N \N \N \N \N \N UInt32 default default kcu i 1 0 UInt32 \N \N 32 2 0 \N \N \N \N \N \N \N \N \N \N UInt32
|
||||
default default kcu s 2 0 String \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N String default default kcu s 2 0 String \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N String
|
||||
default default kcu2 i 1 0 UInt32 \N \N 32 2 0 \N \N \N \N \N \N \N \N \N \N UInt32 default default kcu2 i 1 0 UInt32 \N \N 32 2 0 \N \N \N \N \N \N \N \N \N \N UInt32
|
||||
-- information_schema.columns
|
||||
default default kcu1 i 1 0 UInt32 \N \N 32 2 0 \N \N \N \N \N \N \N \N \N \N UInt32 default default kcu1 i 1 0 UInt32 \N \N 32 2 0 \N \N \N \N \N \N \N \N \N \N UInt32
|
||||
default default kcu1 s 2 0 String \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N String default default kcu1 s 2 0 String \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N String
|
||||
default default kcu2 d 2 0 Date \N \N \N \N \N 0 \N \N \N \N \N \N \N \N \N Date default default kcu2 d 2 0 Date \N \N \N \N \N 0 \N \N \N \N \N \N \N \N \N Date
|
||||
default default kcu2 i 1 0 UInt32 \N \N 32 2 0 \N \N \N \N \N \N \N \N \N \N UInt32 default default kcu2 i 1 0 UInt32 \N \N 32 2 0 \N \N \N \N \N \N \N \N \N \N UInt32
|
||||
default default kcu2 u 3 0 UUID \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N UUID default default kcu2 u 3 0 UUID \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N UUID
|
||||
default default mv dummy 1 0 UInt8 \N \N 8 2 0 \N \N \N \N \N \N \N \N \N \N UInt8 default default mv dummy 1 0 UInt8 \N \N 8 2 0 \N \N \N \N \N \N \N \N \N \N UInt8
|
||||
default default t n 1 0 UInt64 \N \N 64 2 0 \N \N \N \N \N \N \N \N \N \N UInt64 default default t n 1 0 UInt64 \N \N 64 2 0 \N \N \N \N \N \N \N \N \N \N UInt64
|
||||
default default t f 2 0 Float32 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N Float32 default default t f 2 0 Float32 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N Float32
|
||||
default default t s 3 0 String \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N String default default t s 3 0 String \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N String
|
||||
default default t fs 4 0 FixedString(42) 42 42 \N \N \N \N \N \N \N \N \N \N \N \N \N FixedString(42) default default t fs 4 0 FixedString(42) 42 42 \N \N \N \N \N \N \N \N \N \N \N \N \N FixedString(42)
|
||||
default default t d 5 0 Decimal(9, 6) \N \N 9 10 6 \N \N \N \N \N \N \N \N \N \N Decimal(9, 6) default default t d 5 0 Decimal(9, 6) \N \N 9 10 6 \N \N \N \N \N \N \N \N \N \N Decimal(9, 6)
|
||||
default default v n 1 1 Nullable(Int32) \N \N 32 2 0 \N \N \N \N \N \N \N \N \N \N Nullable(Int32) default default v n 1 1 Nullable(Int32) \N \N 32 2 0 \N \N \N \N \N \N \N \N \N \N Nullable(Int32)
|
||||
default default t f 2 0 Float32 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N Float32 default default t f 2 0 Float32 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N Float32
|
||||
default default t fs 4 0 FixedString(42) 42 42 \N \N \N \N \N \N \N \N \N \N \N \N \N FixedString(42) default default t fs 4 0 FixedString(42) 42 42 \N \N \N \N \N \N \N \N \N \N \N \N \N FixedString(42)
|
||||
default default t n 1 0 UInt64 \N \N 64 2 0 \N \N \N \N \N \N \N \N \N \N UInt64 default default t n 1 0 UInt64 \N \N 64 2 0 \N \N \N \N \N \N \N \N \N \N UInt64
|
||||
default default t s 3 0 String \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N String default default t s 3 0 String \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N String
|
||||
default default v f 2 0 Float64 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N Float64 default default v f 2 0 Float64 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N Float64
|
||||
tmp d 1 0 Date \N \N \N \N \N 0 \N \N \N \N \N \N \N \N \N Date tmp d 1 0 Date \N \N \N \N \N 0 \N \N \N \N \N \N \N \N \N Date
|
||||
tmp dt 2 0 DateTime \N \N \N \N \N 0 \N \N \N \N \N \N \N \N \N DateTime tmp dt 2 0 DateTime \N \N \N \N \N 0 \N \N \N \N \N \N \N \N \N DateTime
|
||||
tmp dtms 3 0 DateTime64(3) \N \N \N \N \N 3 \N \N \N \N \N \N \N \N \N DateTime64(3) tmp dtms 3 0 DateTime64(3) \N \N \N \N \N 3 \N \N \N \N \N \N \N \N \N DateTime64(3)
|
||||
1
|
||||
1
|
||||
def default PRIMARY def default kcu i 1 \N \N \N \N def default PRIMARY def default kcu i 1 \N \N \N \N
|
||||
default default v n 1 1 Nullable(Int32) \N \N 32 2 0 \N \N \N \N \N \N \N \N \N \N Nullable(Int32) default default v n 1 1 Nullable(Int32) \N \N 32 2 0 \N \N \N \N \N \N \N \N \N \N Nullable(Int32)
|
||||
-- information_schema.key_column_usage
|
||||
def default PRIMARY def default kcu1 i 1 \N \N \N \N def default PRIMARY def default kcu1 i 1 \N \N \N \N
|
||||
def default PRIMARY def default kcu2 d 1 \N \N \N \N def default PRIMARY def default kcu2 d 1 \N \N \N \N
|
||||
def default PRIMARY def default kcu2 u 1 \N \N \N \N def default PRIMARY def default kcu2 u 1 \N \N \N \N
|
||||
-- information_schema.referential_constraints
|
||||
1
|
||||
1
|
||||
|
@ -1,41 +1,52 @@
|
||||
-- Uppercase/lowercase are okay, mixed case isn't
|
||||
SHOW TABLES FROM information_schema;
|
||||
SHOW TABLES FROM INFORMATION_SCHEMA;
|
||||
SHOW TABLES FROM INFORMATION_schema; -- { serverError UNKNOWN_DATABASE }
|
||||
|
||||
DROP TABLE IF EXISTS t;
|
||||
DROP VIEW IF EXISTS v;
|
||||
DROP TABLE IF EXISTS t;
|
||||
DROP VIEW IF EXISTS mv;
|
||||
DROP TABLE IF EXISTS tmp;
|
||||
DROP TABLE IF EXISTS kcu;
|
||||
DROP TABLE IF EXISTS kcu1;
|
||||
DROP TABLE IF EXISTS kcu2;
|
||||
|
||||
CREATE TABLE t (n UInt64, f Float32, s String, fs FixedString(42), d Decimal(9, 6)) ENGINE=Memory;
|
||||
CREATE TABLE t (n UInt64, f Float32, s String, fs FixedString(42), d Decimal(9, 6)) ENGINE = Memory;
|
||||
CREATE VIEW v (n Nullable(Int32), f Float64) AS SELECT n, f FROM t;
|
||||
CREATE MATERIALIZED VIEW mv ENGINE=Null AS SELECT * FROM system.one;
|
||||
CREATE MATERIALIZED VIEW mv ENGINE = Null AS SELECT * FROM system.one;
|
||||
CREATE TEMPORARY TABLE tmp (d Date, dt DateTime, dtms DateTime64(3));
|
||||
CREATE TABLE kcu (i UInt32, s String) ENGINE MergeTree ORDER BY i;
|
||||
CREATE TABLE kcu1 (i UInt32, s String) ENGINE MergeTree ORDER BY i;
|
||||
CREATE TABLE kcu2 (i UInt32, d Date, u UUID) ENGINE MergeTree ORDER BY (u, d);
|
||||
|
||||
-- FIXME #28687
|
||||
SELECT * FROM information_schema.schemata WHERE schema_name ilike 'information_schema';
|
||||
-- SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA=currentDatabase() OR TABLE_SCHEMA='') AND TABLE_NAME NOT LIKE '%inner%';
|
||||
SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (table_schema = currentDatabase() OR table_schema = '') AND table_name NOT LIKE '%inner%';
|
||||
SELECT * FROM information_schema.views WHERE table_schema = currentDatabase();
|
||||
-- SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE (TABLE_SCHEMA=currentDatabase() OR TABLE_SCHEMA='') AND TABLE_NAME NOT LIKE '%inner%'
|
||||
SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE (table_schema = currentDatabase() OR table_schema = '') AND table_name NOT LIKE '%inner%';
|
||||
|
||||
SELECT '-- information_schema.schemata';
|
||||
SELECT * FROM information_schema.schemata WHERE schema_name ilike 'information_schema' ORDER BY schema_name;
|
||||
|
||||
SELECT '-- information_schema.tables';
|
||||
SELECT * FROM information_schema.tables WHERE table_schema = currentDatabase() AND table_name NOT LIKE '%inner%' ORDER BY table_name;
|
||||
-- SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA=currentDatabase() ORDER BY schema_name;
|
||||
-- FIXME #28687
|
||||
|
||||
SELECT '-- information_schema.views';
|
||||
SELECT * FROM information_schema.views WHERE table_schema = currentDatabase() ORDER BY table_name;
|
||||
-- SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE (TABLE_SCHEMA=currentDatabase() OR TABLE_SCHEMA='') AND TABLE_NAME NOT LIKE '%inner%'
|
||||
|
||||
SELECT '-- information_schema.columns';
|
||||
SELECT * FROM information_schema.columns WHERE table_schema = currentDatabase() AND table_name NOT LIKE '%inner%' ORDER BY table_name, column_name;
|
||||
|
||||
SELECT '-- information_schema.key_column_usage';
|
||||
SELECT * FROM information_schema.key_column_usage WHERE table_schema = currentDatabase() AND table_name = 'kcu1' ORDER BY table_schema, column_name;
|
||||
SELECT * FROM information_schema.key_column_usage WHERE table_schema = currentDatabase() AND table_name = 'kcu2' ORDER BY table_schema, column_name;
|
||||
|
||||
SELECT '-- information_schema.referential_constraints';
|
||||
SELECT * FROM information_schema.referential_constraints;
|
||||
--
|
||||
-- mixed upper/lowercase schema and table name:
|
||||
SELECT count() FROM information_schema.TABLES WHERE table_schema = currentDatabase() AND table_name = 't';
|
||||
SELECT count() FROM INFORMATION_SCHEMA.tables WHERE table_schema = currentDatabase() AND table_name = 't';
|
||||
SELECT count() FROM INFORMATION_schema.tables WHERE table_schema = currentDatabase() AND table_name = 't'; -- { serverError UNKNOWN_DATABASE }
|
||||
SELECT count() FROM information_schema.taBLES WHERE table_schema =currentDatabase() AND table_name = 't'; -- { serverError UNKNOWN_TABLE }
|
||||
|
||||
SELECT * FROM information_schema.key_column_usage WHERE table_schema = currentDatabase() AND table_name = 'kcu';
|
||||
SELECT * FROM information_schema.key_column_usage WHERE table_schema = currentDatabase() AND table_name = 'kcu2';
|
||||
|
||||
SELECT * FROM information_schema.referential_constraints;
|
||||
|
||||
drop view mv;
|
||||
drop view v;
|
||||
drop table t;
|
||||
drop table kcu;
|
||||
drop table kcu2;
|
||||
DROP VIEW mv;
|
||||
DROP VIEW v;
|
||||
DROP TABLE t;
|
||||
DROP TABLE kcu1;
|
||||
DROP TABLE kcu2;
|
||||
|
@ -283,6 +283,7 @@ CREATE TABLE system.functions
|
||||
(
|
||||
`name` String,
|
||||
`is_aggregate` UInt8,
|
||||
`is_deterministic` Nullable(UInt8),
|
||||
`case_insensitive` UInt8,
|
||||
`alias_to` String,
|
||||
`create_query` String,
|
||||
|
@ -1,6 +1,6 @@
|
||||
CREATE DATABASE INFORMATION_SCHEMA\nENGINE = Memory
|
||||
CREATE VIEW INFORMATION_SCHEMA.COLUMNS\n(\n `table_catalog` String,\n `table_schema` String,\n `table_name` String,\n `column_name` String,\n `ordinal_position` UInt64,\n `column_default` String,\n `is_nullable` String,\n `data_type` String,\n `character_maximum_length` Nullable(UInt64),\n `character_octet_length` Nullable(UInt64),\n `numeric_precision` Nullable(UInt64),\n `numeric_precision_radix` Nullable(UInt64),\n `numeric_scale` Nullable(UInt64),\n `datetime_precision` Nullable(UInt64),\n `character_set_catalog` Nullable(String),\n `character_set_schema` Nullable(String),\n `character_set_name` Nullable(String),\n `collation_catalog` Nullable(String),\n `collation_schema` Nullable(String),\n `collation_name` Nullable(String),\n `domain_catalog` Nullable(String),\n `domain_schema` Nullable(String),\n `domain_name` Nullable(String),\n `column_comment` String,\n `column_type` String,\n `TABLE_CATALOG` String,\n `TABLE_SCHEMA` String,\n `TABLE_NAME` String,\n `COLUMN_NAME` String,\n `ORDINAL_POSITION` UInt64,\n `COLUMN_DEFAULT` String,\n `IS_NULLABLE` String,\n `DATA_TYPE` String,\n `CHARACTER_MAXIMUM_LENGTH` Nullable(UInt64),\n `CHARACTER_OCTET_LENGTH` Nullable(UInt64),\n `NUMERIC_PRECISION` Nullable(UInt64),\n `NUMERIC_PRECISION_RADIX` Nullable(UInt64),\n `NUMERIC_SCALE` Nullable(UInt64),\n `DATETIME_PRECISION` Nullable(UInt64),\n `CHARACTER_SET_CATALOG` Nullable(String),\n `CHARACTER_SET_SCHEMA` Nullable(String),\n `CHARACTER_SET_NAME` Nullable(String),\n `COLLATION_CATALOG` Nullable(String),\n `COLLATION_SCHEMA` Nullable(String),\n `COLLATION_NAME` Nullable(String),\n `DOMAIN_CATALOG` Nullable(String),\n `DOMAIN_SCHEMA` Nullable(String),\n `DOMAIN_NAME` Nullable(String),\n `COLUMN_COMMENT` String,\n `COLUMN_TYPE` String\n) AS\nSELECT\n database AS table_catalog,\n database AS table_schema,\n table AS table_name,\n name AS column_name,\n position AS ordinal_position,\n default_expression AS column_default,\n type LIKE \'Nullable(%)\' AS is_nullable,\n type AS data_type,\n character_octet_length AS character_maximum_length,\n character_octet_length,\n numeric_precision,\n numeric_precision_radix,\n numeric_scale,\n datetime_precision,\n NULL AS character_set_catalog,\n NULL AS character_set_schema,\n NULL AS character_set_name,\n NULL AS collation_catalog,\n NULL AS collation_schema,\n NULL AS collation_name,\n NULL AS domain_catalog,\n NULL AS domain_schema,\n NULL AS domain_name,\n comment AS column_comment,\n type AS column_type,\n table_catalog AS TABLE_CATALOG,\n table_schema AS TABLE_SCHEMA,\n table_name AS TABLE_NAME,\n column_name AS COLUMN_NAME,\n ordinal_position AS ORDINAL_POSITION,\n column_default AS COLUMN_DEFAULT,\n is_nullable AS IS_NULLABLE,\n data_type AS DATA_TYPE,\n character_maximum_length AS CHARACTER_MAXIMUM_LENGTH,\n character_octet_length AS CHARACTER_OCTET_LENGTH,\n numeric_precision AS NUMERIC_PRECISION,\n numeric_precision_radix AS NUMERIC_PRECISION_RADIX,\n numeric_scale AS NUMERIC_SCALE,\n datetime_precision AS DATETIME_PRECISION,\n character_set_catalog AS CHARACTER_SET_CATALOG,\n character_set_schema AS CHARACTER_SET_SCHEMA,\n character_set_name AS CHARACTER_SET_NAME,\n collation_catalog AS COLLATION_CATALOG,\n collation_schema AS COLLATION_SCHEMA,\n collation_name AS COLLATION_NAME,\n domain_catalog AS DOMAIN_CATALOG,\n domain_schema AS DOMAIN_SCHEMA,\n domain_name AS DOMAIN_NAME,\n column_comment AS COLUMN_COMMENT,\n column_type AS COLUMN_TYPE\nFROM system.columns
|
||||
CREATE VIEW INFORMATION_SCHEMA.TABLES (`table_catalog` String, `table_schema` String, `table_name` String, `table_type` String, `table_collation` Nullable(String), `table_comment` Nullable(String), `TABLE_CATALOG` String, `TABLE_SCHEMA` String, `TABLE_NAME` String, `TABLE_TYPE` String, `TABLE_COLLATION` Nullable(String), `TABLE_COMMENT` Nullable(String)) AS SELECT database AS table_catalog, database AS table_schema, name AS table_name, multiIf(is_temporary, \'LOCAL TEMPORARY\', engine LIKE \'%View\', \'VIEW\', engine LIKE \'System%\', \'SYSTEM VIEW\', has_own_data = 0, \'FOREIGN TABLE\', \'BASE TABLE\') AS table_type, \'utf8mb4_0900_ai_ci\' AS table_collation, comment AS table_comment, table_catalog AS TABLE_CATALOG, table_schema AS TABLE_SCHEMA, table_name AS TABLE_NAME, table_type AS TABLE_TYPE, table_collation AS TABLE_COLLATION, table_comment AS TABLE_COMMENT FROM system.tables
|
||||
CREATE VIEW INFORMATION_SCHEMA.tables (`table_catalog` String, `table_schema` String, `table_name` String, `table_type` String, `table_collation` Nullable(String), `table_comment` Nullable(String), `TABLE_CATALOG` String, `TABLE_SCHEMA` String, `TABLE_NAME` String, `TABLE_TYPE` String, `TABLE_COLLATION` Nullable(String), `TABLE_COMMENT` Nullable(String)) AS SELECT database AS table_catalog, database AS table_schema, name AS table_name, multiIf(is_temporary, \'LOCAL TEMPORARY\', engine LIKE \'%View\', \'VIEW\', engine LIKE \'System%\', \'SYSTEM VIEW\', has_own_data = 0, \'FOREIGN TABLE\', \'BASE TABLE\') AS table_type, \'utf8mb4_0900_ai_ci\' AS table_collation, comment AS table_comment, table_catalog AS TABLE_CATALOG, table_schema AS TABLE_SCHEMA, table_name AS TABLE_NAME, table_type AS TABLE_TYPE, table_collation AS TABLE_COLLATION, table_comment AS TABLE_COMMENT FROM system.tables
|
||||
CREATE VIEW information_schema.TABLES (`table_catalog` String, `table_schema` String, `table_name` String, `table_type` String, `table_collation` Nullable(String), `table_comment` Nullable(String), `TABLE_CATALOG` String, `TABLE_SCHEMA` String, `TABLE_NAME` String, `TABLE_TYPE` String, `TABLE_COLLATION` Nullable(String), `TABLE_COMMENT` Nullable(String)) AS SELECT database AS table_catalog, database AS table_schema, name AS table_name, multiIf(is_temporary, \'LOCAL TEMPORARY\', engine LIKE \'%View\', \'VIEW\', engine LIKE \'System%\', \'SYSTEM VIEW\', has_own_data = 0, \'FOREIGN TABLE\', \'BASE TABLE\') AS table_type, \'utf8mb4_0900_ai_ci\' AS table_collation, comment AS table_comment, table_catalog AS TABLE_CATALOG, table_schema AS TABLE_SCHEMA, table_name AS TABLE_NAME, table_type AS TABLE_TYPE, table_collation AS TABLE_COLLATION, table_comment AS TABLE_COMMENT FROM system.tables
|
||||
CREATE VIEW information_schema.tables (`table_catalog` String, `table_schema` String, `table_name` String, `table_type` String, `table_collation` Nullable(String), `table_comment` Nullable(String), `TABLE_CATALOG` String, `TABLE_SCHEMA` String, `TABLE_NAME` String, `TABLE_TYPE` String, `TABLE_COLLATION` Nullable(String), `TABLE_COMMENT` Nullable(String)) AS SELECT database AS table_catalog, database AS table_schema, name AS table_name, multiIf(is_temporary, \'LOCAL TEMPORARY\', engine LIKE \'%View\', \'VIEW\', engine LIKE \'System%\', \'SYSTEM VIEW\', has_own_data = 0, \'FOREIGN TABLE\', \'BASE TABLE\') AS table_type, \'utf8mb4_0900_ai_ci\' AS table_collation, comment AS table_comment, table_catalog AS TABLE_CATALOG, table_schema AS TABLE_SCHEMA, table_name AS TABLE_NAME, table_type AS TABLE_TYPE, table_collation AS TABLE_COLLATION, table_comment AS TABLE_COMMENT FROM system.tables
|
||||
CREATE VIEW INFORMATION_SCHEMA.TABLES (`table_catalog` String, `table_schema` String, `table_name` String, `table_type` String, `data_length` Nullable(UInt64), `table_collation` Nullable(String), `table_comment` Nullable(String), `TABLE_CATALOG` String, `TABLE_SCHEMA` String, `TABLE_NAME` String, `TABLE_TYPE` String, `DATA_LENGTH` Nullable(UInt64), `TABLE_COLLATION` Nullable(String), `TABLE_COMMENT` Nullable(String)) AS SELECT database AS table_catalog, database AS table_schema, name AS table_name, multiIf(is_temporary, \'LOCAL TEMPORARY\', engine LIKE \'%View\', \'VIEW\', engine LIKE \'System%\', \'SYSTEM VIEW\', has_own_data = 0, \'FOREIGN TABLE\', \'BASE TABLE\') AS table_type, total_bytes AS data_length, \'utf8mb4_0900_ai_ci\' AS table_collation, comment AS table_comment, table_catalog AS TABLE_CATALOG, table_schema AS TABLE_SCHEMA, table_name AS TABLE_NAME, table_type AS TABLE_TYPE, data_length AS DATA_LENGTH, table_collation AS TABLE_COLLATION, table_comment AS TABLE_COMMENT FROM system.tables
|
||||
CREATE VIEW INFORMATION_SCHEMA.tables (`table_catalog` String, `table_schema` String, `table_name` String, `table_type` String, `data_length` Nullable(UInt64), `table_collation` Nullable(String), `table_comment` Nullable(String), `TABLE_CATALOG` String, `TABLE_SCHEMA` String, `TABLE_NAME` String, `TABLE_TYPE` String, `DATA_LENGTH` Nullable(UInt64), `TABLE_COLLATION` Nullable(String), `TABLE_COMMENT` Nullable(String)) AS SELECT database AS table_catalog, database AS table_schema, name AS table_name, multiIf(is_temporary, \'LOCAL TEMPORARY\', engine LIKE \'%View\', \'VIEW\', engine LIKE \'System%\', \'SYSTEM VIEW\', has_own_data = 0, \'FOREIGN TABLE\', \'BASE TABLE\') AS table_type, total_bytes AS data_length, \'utf8mb4_0900_ai_ci\' AS table_collation, comment AS table_comment, table_catalog AS TABLE_CATALOG, table_schema AS TABLE_SCHEMA, table_name AS TABLE_NAME, table_type AS TABLE_TYPE, data_length AS DATA_LENGTH, table_collation AS TABLE_COLLATION, table_comment AS TABLE_COMMENT FROM system.tables
|
||||
CREATE VIEW information_schema.TABLES (`table_catalog` String, `table_schema` String, `table_name` String, `table_type` String, `data_length` Nullable(UInt64), `table_collation` Nullable(String), `table_comment` Nullable(String), `TABLE_CATALOG` String, `TABLE_SCHEMA` String, `TABLE_NAME` String, `TABLE_TYPE` String, `DATA_LENGTH` Nullable(UInt64), `TABLE_COLLATION` Nullable(String), `TABLE_COMMENT` Nullable(String)) AS SELECT database AS table_catalog, database AS table_schema, name AS table_name, multiIf(is_temporary, \'LOCAL TEMPORARY\', engine LIKE \'%View\', \'VIEW\', engine LIKE \'System%\', \'SYSTEM VIEW\', has_own_data = 0, \'FOREIGN TABLE\', \'BASE TABLE\') AS table_type, total_bytes AS data_length, \'utf8mb4_0900_ai_ci\' AS table_collation, comment AS table_comment, table_catalog AS TABLE_CATALOG, table_schema AS TABLE_SCHEMA, table_name AS TABLE_NAME, table_type AS TABLE_TYPE, data_length AS DATA_LENGTH, table_collation AS TABLE_COLLATION, table_comment AS TABLE_COMMENT FROM system.tables
|
||||
CREATE VIEW information_schema.tables (`table_catalog` String, `table_schema` String, `table_name` String, `table_type` String, `data_length` Nullable(UInt64), `table_collation` Nullable(String), `table_comment` Nullable(String), `TABLE_CATALOG` String, `TABLE_SCHEMA` String, `TABLE_NAME` String, `TABLE_TYPE` String, `DATA_LENGTH` Nullable(UInt64), `TABLE_COLLATION` Nullable(String), `TABLE_COMMENT` Nullable(String)) AS SELECT database AS table_catalog, database AS table_schema, name AS table_name, multiIf(is_temporary, \'LOCAL TEMPORARY\', engine LIKE \'%View\', \'VIEW\', engine LIKE \'System%\', \'SYSTEM VIEW\', has_own_data = 0, \'FOREIGN TABLE\', \'BASE TABLE\') AS table_type, total_bytes AS data_length, \'utf8mb4_0900_ai_ci\' AS table_collation, comment AS table_comment, table_catalog AS TABLE_CATALOG, table_schema AS TABLE_SCHEMA, table_name AS TABLE_NAME, table_type AS TABLE_TYPE, data_length AS DATA_LENGTH, table_collation AS TABLE_COLLATION, table_comment AS TABLE_COMMENT FROM system.tables
|
||||
|
@ -1,18 +0,0 @@
|
||||
status before reload
|
||||
status after reload
|
||||
NOT_LOADED 0
|
||||
LOADED 0
|
||||
FAILED 1
|
||||
LOADING 0
|
||||
FAILED_AND_RELOADING 0
|
||||
LOADED_AND_RELOADING 0
|
||||
NOT_EXIST 0
|
||||
status after reload, table exists
|
||||
NOT_LOADED 0
|
||||
LOADED 1
|
||||
FAILED 0
|
||||
LOADING 0
|
||||
FAILED_AND_RELOADING 0
|
||||
LOADED_AND_RELOADING 0
|
||||
NOT_EXIST 0
|
||||
status after drop
|
@ -1,38 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-ordinary-database
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
function get_dictionary_status()
|
||||
{
|
||||
local name=$1 && shift
|
||||
$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL_PROMETHEUS" | {
|
||||
awk -F'[{}=," ]' -vname="$name" '/ClickHouseStatusInfo_DictionaryStatus{/ && $(NF-3) == name { print $4, $NF }'
|
||||
}
|
||||
}
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "CREATE DICTIONARY dict (key Int, value String) PRIMARY KEY key SOURCE(CLICKHOUSE(TABLE data)) LAYOUT(HASHED()) LIFETIME(0)"
|
||||
uuid="$($CLICKHOUSE_CLIENT -q "SELECT uuid FROM system.dictionaries WHERE database = '$CLICKHOUSE_DATABASE' AND name = 'dict'")"
|
||||
|
||||
echo 'status before reload'
|
||||
get_dictionary_status "$uuid"
|
||||
|
||||
# source table does not exists
|
||||
# NOTE: when dictionary does not exist it produce BAD_ARGUMENTS error, so using UNKNOWN_TABLE is safe
|
||||
$CLICKHOUSE_CLIENT -n -q "SYSTEM RELOAD DICTIONARY dict -- { serverError UNKNOWN_TABLE }"
|
||||
echo 'status after reload'
|
||||
get_dictionary_status "$uuid"
|
||||
|
||||
# create source
|
||||
$CLICKHOUSE_CLIENT -q "CREATE TABLE data (key Int, value String) Engine=Null"
|
||||
$CLICKHOUSE_CLIENT -q "SYSTEM RELOAD DICTIONARY dict"
|
||||
echo 'status after reload, table exists'
|
||||
get_dictionary_status "$uuid"
|
||||
|
||||
# remove dictionary
|
||||
$CLICKHOUSE_CLIENT -q "DROP DICTIONARY dict"
|
||||
$CLICKHOUSE_CLIENT -q "DROP TABLE data"
|
||||
echo 'status after drop'
|
||||
get_dictionary_status "$uuid"
|
@ -53,4 +53,9 @@ expect eof
|
||||
EOF
|
||||
}
|
||||
|
||||
run "$CLICKHOUSE_LOCAL"
|
||||
run "$CLICKHOUSE_LOCAL --disable_suggestion"
|
||||
# Suggestions are off because the suggestion feature initializes itself by reading all available function
|
||||
# names from "system.functions". Getting the value for field "is_obsolete" occasionally throws (e.g. for
|
||||
# certain dictionary functions when dictionaries are not set up yet). Exceptions are properly handled, but
|
||||
# they exist for a short time. This, in combination with CLICKHOUSE_TERMINATE_ON_ANY_EXCEPTION, terminates
|
||||
# clickhouse-local and clickhouse-client when run in interactive mode *with* suggestions.
|
||||
|
@ -1,4 +1,25 @@
|
||||
const arguments byteHammingDistance
|
||||
0
|
||||
const arguments byteEditDistance
|
||||
6
|
||||
const arguments byteJaccardIndex
|
||||
0.4
|
||||
byteHammingDistance
|
||||
1
|
||||
7
|
||||
7
|
||||
10
|
||||
byteHammingDistance(const, non const)
|
||||
3
|
||||
6
|
||||
3
|
||||
10
|
||||
byteHammingDistance(non const, const)
|
||||
6
|
||||
6
|
||||
3
|
||||
10
|
||||
mismatches(alias)
|
||||
1
|
||||
7
|
||||
7
|
||||
@ -11,26 +32,17 @@
|
||||
6
|
||||
3
|
||||
10
|
||||
1
|
||||
7
|
||||
7
|
||||
10
|
||||
3
|
||||
6
|
||||
3
|
||||
10
|
||||
6
|
||||
6
|
||||
3
|
||||
10
|
||||
byteJaccardIndex
|
||||
0.8571428571428571
|
||||
0.8571428571428571
|
||||
0
|
||||
0.4
|
||||
byteEditDistance
|
||||
1
|
||||
1
|
||||
7
|
||||
6
|
||||
byteLevenshteinDistance
|
||||
1
|
||||
1
|
||||
7
|
||||
|
@ -1,4 +1,10 @@
|
||||
select 'const arguments byteHammingDistance';
|
||||
select byteHammingDistance('abcd', 'abcd');
|
||||
select 'const arguments byteEditDistance';
|
||||
select byteEditDistance('clickhouse', 'mouse');
|
||||
select 'const arguments byteJaccardIndex';
|
||||
select byteJaccardIndex('clickhouse', 'mouse');
|
||||
|
||||
drop table if exists t;
|
||||
create table t
|
||||
(
|
||||
@ -7,18 +13,25 @@ create table t
|
||||
) engine = MergeTree order by s1;
|
||||
|
||||
insert into t values ('abcdefg', 'abcdef') ('abcdefg', 'bcdefg') ('abcdefg', '') ('mouse', 'clickhouse');
|
||||
select 'byteHammingDistance';
|
||||
select byteHammingDistance(s1, s2) from t;
|
||||
select 'byteHammingDistance(const, non const)';
|
||||
select byteHammingDistance('abc', s2) from t;
|
||||
select 'byteHammingDistance(non const, const)';
|
||||
select byteHammingDistance(s2, 'def') from t;
|
||||
|
||||
select 'mismatches(alias)';
|
||||
select mismatches(s1, s2) from t;
|
||||
select mismatches('abc', s2) from t;
|
||||
select mismatches(s2, 'def') from t;
|
||||
|
||||
select 'byteJaccardIndex';
|
||||
select byteJaccardIndex(s1, s2) from t;
|
||||
select 'byteEditDistance';
|
||||
select byteEditDistance(s1, s2) from t;
|
||||
select 'byteLevenshteinDistance';
|
||||
select byteLevenshteinDistance(s1, s2) from t;
|
||||
|
||||
SELECT byteEditDistance(randomString(power(2, 17)), 'abc'); -- { serverError 131 }
|
||||
SELECT byteEditDistance(randomString(power(2, 17)), 'abc'); -- { serverError TOO_LARGE_STRING_SIZE}
|
||||
|
||||
drop table t;
|
||||
|
@ -0,0 +1,24 @@
|
||||
-9223372036854775808
|
||||
9223372036854775806
|
||||
-9223372036854775808
|
||||
9223372036854775806
|
||||
-9223372036854775808
|
||||
9223372036854775806
|
||||
-9223372036854775808
|
||||
9223372036854775806
|
||||
[2,65537,-9223372036854775808]
|
||||
[9223372036854775806]
|
||||
[2,65537,-9223372036854775808]
|
||||
[9223372036854775806]
|
||||
[2,65537,-9223372036854775808]
|
||||
[9223372036854775806]
|
||||
[2,65537,-9223372036854775808]
|
||||
[9223372036854775806]
|
||||
[[2,65537,-9223372036854775808]]
|
||||
[[9223372036854775806]]
|
||||
[[2,65537,-9223372036854775808]]
|
||||
[[9223372036854775806]]
|
||||
[[2,65537,-9223372036854775808]]
|
||||
[[9223372036854775806]]
|
||||
[[2,65537,-9223372036854775808]]
|
||||
[[9223372036854775806]]
|
@ -0,0 +1,13 @@
|
||||
SELECT if(number % 2, 9223372036854775806, -9223372036854775808) AS res FROM numbers(2);
|
||||
SELECT if(number % 2, materialize(9223372036854775806), -9223372036854775808) AS res FROM numbers(2);
|
||||
SELECT if(number % 2, 9223372036854775806, materialize(-9223372036854775808)) AS res FROM numbers(2);
|
||||
SELECT if(number % 2, materialize(9223372036854775806), materialize(-9223372036854775808)) AS res FROM numbers(2);
|
||||
SELECT if(number % 2, [9223372036854775806], [2, 65537, -9223372036854775808]) AS res FROM numbers(2);
|
||||
SELECT if(number % 2, materialize([9223372036854775806]), [2, 65537, -9223372036854775808]) AS res FROM numbers(2);
|
||||
SELECT if(number % 2, [9223372036854775806], materialize([2, 65537, -9223372036854775808])) AS res FROM numbers(2);
|
||||
SELECT if(number % 2, materialize([9223372036854775806]), materialize([2, 65537, -9223372036854775808])) AS res FROM numbers(2);
|
||||
SELECT if(number % 2, [[9223372036854775806]], [[2, 65537, -9223372036854775808]]) AS res FROM numbers(2);
|
||||
SELECT if(number % 2, materialize([[9223372036854775806]]), [[2, 65537, -9223372036854775808]]) AS res FROM numbers(2);
|
||||
SELECT if(number % 2, [[9223372036854775806]], materialize([[2, 65537, -9223372036854775808]])) AS res FROM numbers(2);
|
||||
SELECT if(number % 2, materialize([[9223372036854775806]]), materialize([[2, 65537, -9223372036854775808]])) AS res FROM numbers(2);
|
||||
|
@ -12,12 +12,16 @@ tmp=$(mktemp)
|
||||
# in the repo except the contrib directory.
|
||||
find_cmd=(
|
||||
find "$GIT_ROOT" -type f -not -path "${GIT_ROOT}contrib/*"
|
||||
\(
|
||||
\(
|
||||
-name '*.py' -or -name "*.python" -or
|
||||
\(
|
||||
-executable -not -name "*.*" -exec sh -c 'file {} | grep -q "Python script"' \;
|
||||
\)
|
||||
\)
|
||||
# We skip modules generated by the protocol buffer compiler from *.proto files.
|
||||
-and -not -name '*_pb2.py' -and -not -name '*_pb2_grpc.py'
|
||||
\)
|
||||
)
|
||||
|
||||
if ! "${find_cmd[@]}" -exec black --check --diff {} + 1>"$tmp" 2>&1; then
|
||||
|
@ -10,10 +10,6 @@
|
||||
# Most of the command line options are the same, for more information type
|
||||
# ./clickhouse_grpc_client.py --help
|
||||
|
||||
import grpc # pip3 install grpcio
|
||||
import grpc_tools # pip3 install grpcio-tools
|
||||
import argparse, cmd, os, signal, subprocess, sys, threading, time, uuid
|
||||
|
||||
DEFAULT_HOST = "localhost"
|
||||
DEFAULT_PORT = 9100
|
||||
DEFAULT_USER_NAME = "default"
|
||||
@ -24,6 +20,16 @@ STDIN_BUFFER_SIZE = 1048576
|
||||
DEFAULT_ENCODING = "utf-8"
|
||||
|
||||
|
||||
import grpc # pip3 install grpcio
|
||||
import argparse, cmd, os, signal, sys, threading, time, uuid
|
||||
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
pb2_dir = os.path.join(script_dir, "pb2")
|
||||
if pb2_dir not in sys.path:
|
||||
sys.path.append(pb2_dir)
|
||||
import clickhouse_grpc_pb2, clickhouse_grpc_pb2_grpc # Execute pb2/generate.py to generate these modules.
|
||||
|
||||
|
||||
class ClickHouseGRPCError(Exception):
|
||||
pass
|
||||
|
||||
@ -80,8 +86,6 @@ class ClickHouseGRPCClient(cmd.Cmd):
|
||||
self.session_id = None
|
||||
|
||||
def __enter__(self):
|
||||
ClickHouseGRPCClient.__generate_pb2()
|
||||
ClickHouseGRPCClient.__import_pb2()
|
||||
self.__connect()
|
||||
return self
|
||||
|
||||
@ -229,40 +233,6 @@ class ClickHouseGRPCClient(cmd.Cmd):
|
||||
if result.HasField("exception"):
|
||||
raise ClickHouseGRPCError(result.exception.display_text)
|
||||
|
||||
# Use grpcio-tools to generate *pb2.py files from *.proto.
|
||||
@staticmethod
|
||||
def __generate_pb2():
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
proto_dir = os.path.join(script_dir, "./protos")
|
||||
gen_dir = os.path.join(script_dir, "./_gen")
|
||||
if os.path.exists(os.path.join(gen_dir, "clickhouse_grpc_pb2_grpc.py")):
|
||||
return
|
||||
os.makedirs(gen_dir, exist_ok=True)
|
||||
cmd = [
|
||||
"python3",
|
||||
"-m",
|
||||
"grpc_tools.protoc",
|
||||
"-I" + proto_dir,
|
||||
"--python_out=" + gen_dir,
|
||||
"--grpc_python_out=" + gen_dir,
|
||||
proto_dir + "/clickhouse_grpc.proto",
|
||||
]
|
||||
p = subprocess.Popen(cmd, stderr=subprocess.PIPE)
|
||||
# We don't want to show grpc_tools warnings.
|
||||
errors = p.stderr.read().decode().strip("\n").split("\n")
|
||||
only_warnings = all(("Warning" in error) for error in errors)
|
||||
if not only_warnings:
|
||||
error_print("\n".join(errors))
|
||||
|
||||
# Import the generated *pb2.py files.
|
||||
@staticmethod
|
||||
def __import_pb2():
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
gen_dir = os.path.join(script_dir, "./_gen")
|
||||
sys.path.append(gen_dir)
|
||||
global clickhouse_grpc_pb2, clickhouse_grpc_pb2_grpc
|
||||
import clickhouse_grpc_pb2, clickhouse_grpc_pb2_grpc
|
||||
|
||||
# Prints only if interactive mode is activated.
|
||||
def verbatim_print(self, *args, **kwargs):
|
||||
if self.verbatim:
|
||||
|
163
utils/grpc-client/pb2/clickhouse_grpc_pb2.py
Normal file
163
utils/grpc-client/pb2/clickhouse_grpc_pb2.py
Normal file
File diff suppressed because one or more lines are too long
165
utils/grpc-client/pb2/clickhouse_grpc_pb2_grpc.py
Normal file
165
utils/grpc-client/pb2/clickhouse_grpc_pb2_grpc.py
Normal file
@ -0,0 +1,165 @@
|
||||
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
||||
"""Client and server classes corresponding to protobuf-defined services."""
|
||||
import grpc
|
||||
|
||||
import clickhouse_grpc_pb2 as clickhouse__grpc__pb2
|
||||
|
||||
|
||||
class ClickHouseStub(object):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
|
||||
def __init__(self, channel):
|
||||
"""Constructor.
|
||||
|
||||
Args:
|
||||
channel: A grpc.Channel.
|
||||
"""
|
||||
self.ExecuteQuery = channel.unary_unary(
|
||||
'/clickhouse.grpc.ClickHouse/ExecuteQuery',
|
||||
request_serializer=clickhouse__grpc__pb2.QueryInfo.SerializeToString,
|
||||
response_deserializer=clickhouse__grpc__pb2.Result.FromString,
|
||||
)
|
||||
self.ExecuteQueryWithStreamInput = channel.stream_unary(
|
||||
'/clickhouse.grpc.ClickHouse/ExecuteQueryWithStreamInput',
|
||||
request_serializer=clickhouse__grpc__pb2.QueryInfo.SerializeToString,
|
||||
response_deserializer=clickhouse__grpc__pb2.Result.FromString,
|
||||
)
|
||||
self.ExecuteQueryWithStreamOutput = channel.unary_stream(
|
||||
'/clickhouse.grpc.ClickHouse/ExecuteQueryWithStreamOutput',
|
||||
request_serializer=clickhouse__grpc__pb2.QueryInfo.SerializeToString,
|
||||
response_deserializer=clickhouse__grpc__pb2.Result.FromString,
|
||||
)
|
||||
self.ExecuteQueryWithStreamIO = channel.stream_stream(
|
||||
'/clickhouse.grpc.ClickHouse/ExecuteQueryWithStreamIO',
|
||||
request_serializer=clickhouse__grpc__pb2.QueryInfo.SerializeToString,
|
||||
response_deserializer=clickhouse__grpc__pb2.Result.FromString,
|
||||
)
|
||||
|
||||
|
||||
class ClickHouseServicer(object):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
|
||||
def ExecuteQuery(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
def ExecuteQueryWithStreamInput(self, request_iterator, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
def ExecuteQueryWithStreamOutput(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
def ExecuteQueryWithStreamIO(self, request_iterator, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
|
||||
def add_ClickHouseServicer_to_server(servicer, server):
|
||||
rpc_method_handlers = {
|
||||
'ExecuteQuery': grpc.unary_unary_rpc_method_handler(
|
||||
servicer.ExecuteQuery,
|
||||
request_deserializer=clickhouse__grpc__pb2.QueryInfo.FromString,
|
||||
response_serializer=clickhouse__grpc__pb2.Result.SerializeToString,
|
||||
),
|
||||
'ExecuteQueryWithStreamInput': grpc.stream_unary_rpc_method_handler(
|
||||
servicer.ExecuteQueryWithStreamInput,
|
||||
request_deserializer=clickhouse__grpc__pb2.QueryInfo.FromString,
|
||||
response_serializer=clickhouse__grpc__pb2.Result.SerializeToString,
|
||||
),
|
||||
'ExecuteQueryWithStreamOutput': grpc.unary_stream_rpc_method_handler(
|
||||
servicer.ExecuteQueryWithStreamOutput,
|
||||
request_deserializer=clickhouse__grpc__pb2.QueryInfo.FromString,
|
||||
response_serializer=clickhouse__grpc__pb2.Result.SerializeToString,
|
||||
),
|
||||
'ExecuteQueryWithStreamIO': grpc.stream_stream_rpc_method_handler(
|
||||
servicer.ExecuteQueryWithStreamIO,
|
||||
request_deserializer=clickhouse__grpc__pb2.QueryInfo.FromString,
|
||||
response_serializer=clickhouse__grpc__pb2.Result.SerializeToString,
|
||||
),
|
||||
}
|
||||
generic_handler = grpc.method_handlers_generic_handler(
|
||||
'clickhouse.grpc.ClickHouse', rpc_method_handlers)
|
||||
server.add_generic_rpc_handlers((generic_handler,))
|
||||
|
||||
|
||||
# This class is part of an EXPERIMENTAL API.
|
||||
class ClickHouse(object):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
|
||||
@staticmethod
|
||||
def ExecuteQuery(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_unary(request, target, '/clickhouse.grpc.ClickHouse/ExecuteQuery',
|
||||
clickhouse__grpc__pb2.QueryInfo.SerializeToString,
|
||||
clickhouse__grpc__pb2.Result.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
||||
|
||||
@staticmethod
|
||||
def ExecuteQueryWithStreamInput(request_iterator,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.stream_unary(request_iterator, target, '/clickhouse.grpc.ClickHouse/ExecuteQueryWithStreamInput',
|
||||
clickhouse__grpc__pb2.QueryInfo.SerializeToString,
|
||||
clickhouse__grpc__pb2.Result.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
||||
|
||||
@staticmethod
|
||||
def ExecuteQueryWithStreamOutput(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_stream(request, target, '/clickhouse.grpc.ClickHouse/ExecuteQueryWithStreamOutput',
|
||||
clickhouse__grpc__pb2.QueryInfo.SerializeToString,
|
||||
clickhouse__grpc__pb2.Result.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
||||
|
||||
@staticmethod
|
||||
def ExecuteQueryWithStreamIO(request_iterator,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.stream_stream(request_iterator, target, '/clickhouse.grpc.ClickHouse/ExecuteQueryWithStreamIO',
|
||||
clickhouse__grpc__pb2.QueryInfo.SerializeToString,
|
||||
clickhouse__grpc__pb2.Result.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
29
utils/grpc-client/pb2/generate.py
Executable file
29
utils/grpc-client/pb2/generate.py
Executable file
@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import grpc_tools # pip3 install grpcio-tools
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
dest_dir = script_dir
|
||||
src_dir = os.path.abspath(os.path.join(script_dir, "../../../src/Server/grpc_protos"))
|
||||
src_filename = "clickhouse_grpc.proto"
|
||||
|
||||
|
||||
def generate():
|
||||
cmd = [
|
||||
"python3",
|
||||
"-m",
|
||||
"grpc_tools.protoc",
|
||||
"-I" + src_dir,
|
||||
"--python_out=" + dest_dir,
|
||||
"--grpc_python_out=" + dest_dir,
|
||||
os.path.join(src_dir, src_filename),
|
||||
]
|
||||
subprocess.run(cmd)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
generate()
|
@ -1 +0,0 @@
|
||||
../../../src/Server/grpc_protos/clickhouse_grpc.proto
|
Loading…
Reference in New Issue
Block a user